Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PF_INET protocol family socket handler.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Florian La Roche, <flla@stud.uni-sb.de>
11 * Alan Cox, <A.Cox@swansea.ac.uk>
12 *
13 * Changes (see also sock.c)
14 *
15 * piggy,
16 * Karl Knutson : Socket protocol table
17 * A.N.Kuznetsov : Socket death error in accept().
18 * John Richardson : Fix non blocking error in connect()
19 * so sockets that fail to connect
20 * don't return -EINPROGRESS.
21 * Alan Cox : Asynchronous I/O support
22 * Alan Cox : Keep correct socket pointer on sock
23 * structures
24 * when accept() ed
25 * Alan Cox : Semantics of SO_LINGER aren't state
26 * moved to close when you look carefully.
27 * With this fixed and the accept bug fixed
28 * some RPC stuff seems happier.
29 * Niibe Yutaka : 4.4BSD style write async I/O
30 * Alan Cox,
31 * Tony Gale : Fixed reuse semantics.
32 * Alan Cox : bind() shouldn't abort existing but dead
33 * sockets. Stops FTP netin:.. I hope.
34 * Alan Cox : bind() works correctly for RAW sockets.
35 * Note that FreeBSD at least was broken
36 * in this respect so be careful with
37 * compatibility tests...
38 * Alan Cox : routing cache support
39 * Alan Cox : memzero the socket structure for
40 * compactness.
41 * Matt Day : nonblock connect error handler
42 * Alan Cox : Allow large numbers of pending sockets
43 * (eg for big web sites), but only if
44 * specifically application requested.
45 * Alan Cox : New buffering throughout IP. Used
46 * dumbly.
47 * Alan Cox : New buffering now used smartly.
48 * Alan Cox : BSD rather than common sense
49 * interpretation of listen.
50 * Germano Caronni : Assorted small races.
51 * Alan Cox : sendmsg/recvmsg basic support.
52 * Alan Cox : Only sendmsg/recvmsg now supported.
53 * Alan Cox : Locked down bind (see security list).
54 * Alan Cox : Loosened bind a little.
55 * Mike McLagan : ADD/DEL DLCI Ioctls
56 * Willy Konynenberg : Transparent proxying support.
57 * David S. Miller : New socket lookup architecture.
58 * Some other random speedups.
59 * Cyrus Durgin : Cleaned up file for kmod hacks.
60 * Andi Kleen : Fix inet_stream_connect TCP race.
61 *
62 * This program is free software; you can redistribute it and/or
63 * modify it under the terms of the GNU General Public License
64 * as published by the Free Software Foundation; either version
65 * 2 of the License, or (at your option) any later version.
66 */
67
68#define pr_fmt(fmt) "IPv4: " fmt
69
70#include <linux/err.h>
71#include <linux/errno.h>
72#include <linux/types.h>
73#include <linux/socket.h>
74#include <linux/in.h>
75#include <linux/kernel.h>
76#include <linux/module.h>
77#include <linux/sched.h>
78#include <linux/timer.h>
79#include <linux/string.h>
80#include <linux/sockios.h>
81#include <linux/net.h>
82#include <linux/capability.h>
83#include <linux/fcntl.h>
84#include <linux/mm.h>
85#include <linux/interrupt.h>
86#include <linux/stat.h>
87#include <linux/init.h>
88#include <linux/poll.h>
89#include <linux/netfilter_ipv4.h>
90#include <linux/random.h>
91#include <linux/slab.h>
92
93#include <asm/uaccess.h>
94
95#include <linux/inet.h>
96#include <linux/igmp.h>
97#include <linux/inetdevice.h>
98#include <linux/netdevice.h>
99#include <net/checksum.h>
100#include <net/ip.h>
101#include <net/protocol.h>
102#include <net/arp.h>
103#include <net/route.h>
104#include <net/ip_fib.h>
105#include <net/inet_connection_sock.h>
106#include <net/tcp.h>
107#include <net/udp.h>
108#include <net/udplite.h>
109#include <net/ping.h>
110#include <linux/skbuff.h>
111#include <net/sock.h>
112#include <net/raw.h>
113#include <net/icmp.h>
114#include <net/inet_common.h>
115#include <net/xfrm.h>
116#include <net/net_namespace.h>
117#include <net/secure_seq.h>
118#ifdef CONFIG_IP_MROUTE
119#include <linux/mroute.h>
120#endif
121
122
123/* The inetsw table contains everything that inet_create needs to
124 * build a new socket.
125 */
126static struct list_head inetsw[SOCK_MAX];
127static DEFINE_SPINLOCK(inetsw_lock);
128
129/* New destruction routine */
130
131void inet_sock_destruct(struct sock *sk)
132{
133 struct inet_sock *inet = inet_sk(sk);
134
135 __skb_queue_purge(&sk->sk_receive_queue);
136 __skb_queue_purge(&sk->sk_error_queue);
137
138 sk_mem_reclaim(sk);
139
140 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
141 pr_err("Attempt to release TCP socket in state %d %p\n",
142 sk->sk_state, sk);
143 return;
144 }
145 if (!sock_flag(sk, SOCK_DEAD)) {
146 pr_err("Attempt to release alive inet socket %p\n", sk);
147 return;
148 }
149
150 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
151 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
152 WARN_ON(sk->sk_wmem_queued);
153 WARN_ON(sk->sk_forward_alloc);
154
155 kfree(rcu_dereference_protected(inet->inet_opt, 1));
156 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
157 dst_release(sk->sk_rx_dst);
158 sk_refcnt_debug_dec(sk);
159}
160EXPORT_SYMBOL(inet_sock_destruct);
161
162/*
163 * The routines beyond this point handle the behaviour of an AF_INET
164 * socket object. Mostly it punts to the subprotocols of IP to do
165 * the work.
166 */
167
168/*
169 * Automatically bind an unbound socket.
170 */
171
172static int inet_autobind(struct sock *sk)
173{
174 struct inet_sock *inet;
175 /* We may need to bind the socket. */
176 lock_sock(sk);
177 inet = inet_sk(sk);
178 if (!inet->inet_num) {
179 if (sk->sk_prot->get_port(sk, 0)) {
180 release_sock(sk);
181 return -EAGAIN;
182 }
183 inet->inet_sport = htons(inet->inet_num);
184 }
185 release_sock(sk);
186 return 0;
187}
188
189/*
190 * Move a socket into listening state.
191 */
192int inet_listen(struct socket *sock, int backlog)
193{
194 struct sock *sk = sock->sk;
195 unsigned char old_state;
196 int err;
197
198 lock_sock(sk);
199
200 err = -EINVAL;
201 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
202 goto out;
203
204 old_state = sk->sk_state;
205 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
206 goto out;
207
208 /* Really, if the socket is already in listen state
209 * we can only allow the backlog to be adjusted.
210 */
211 if (old_state != TCP_LISTEN) {
212 /* Check special setups for testing purpose to enable TFO w/o
213 * requiring TCP_FASTOPEN sockopt.
214 * Note that only TCP sockets (SOCK_STREAM) will reach here.
215 * Also fastopenq may already been allocated because this
216 * socket was in TCP_LISTEN state previously but was
217 * shutdown() (rather than close()).
218 */
219 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
220 inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
221 if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
222 err = fastopen_init_queue(sk, backlog);
223 else if ((sysctl_tcp_fastopen &
224 TFO_SERVER_WO_SOCKOPT2) != 0)
225 err = fastopen_init_queue(sk,
226 ((uint)sysctl_tcp_fastopen) >> 16);
227 else
228 err = 0;
229 if (err)
230 goto out;
231 }
232 err = inet_csk_listen_start(sk, backlog);
233 if (err)
234 goto out;
235 }
236 sk->sk_max_ack_backlog = backlog;
237 err = 0;
238
239out:
240 release_sock(sk);
241 return err;
242}
243EXPORT_SYMBOL(inet_listen);
244
245/*
246 * Create an inet socket.
247 */
248
249static int inet_create(struct net *net, struct socket *sock, int protocol,
250 int kern)
251{
252 struct sock *sk;
253 struct inet_protosw *answer;
254 struct inet_sock *inet;
255 struct proto *answer_prot;
256 unsigned char answer_flags;
257 char answer_no_check;
258 int try_loading_module = 0;
259 int err;
260
261 sock->state = SS_UNCONNECTED;
262
263 /* Look for the requested type/protocol pair. */
264lookup_protocol:
265 err = -ESOCKTNOSUPPORT;
266 rcu_read_lock();
267 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
268
269 err = 0;
270 /* Check the non-wild match. */
271 if (protocol == answer->protocol) {
272 if (protocol != IPPROTO_IP)
273 break;
274 } else {
275 /* Check for the two wild cases. */
276 if (IPPROTO_IP == protocol) {
277 protocol = answer->protocol;
278 break;
279 }
280 if (IPPROTO_IP == answer->protocol)
281 break;
282 }
283 err = -EPROTONOSUPPORT;
284 }
285
286 if (unlikely(err)) {
287 if (try_loading_module < 2) {
288 rcu_read_unlock();
289 /*
290 * Be more specific, e.g. net-pf-2-proto-132-type-1
291 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
292 */
293 if (++try_loading_module == 1)
294 request_module("net-pf-%d-proto-%d-type-%d",
295 PF_INET, protocol, sock->type);
296 /*
297 * Fall back to generic, e.g. net-pf-2-proto-132
298 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
299 */
300 else
301 request_module("net-pf-%d-proto-%d",
302 PF_INET, protocol);
303 goto lookup_protocol;
304 } else
305 goto out_rcu_unlock;
306 }
307
308 err = -EPERM;
309 if (sock->type == SOCK_RAW && !kern &&
310 !ns_capable(net->user_ns, CAP_NET_RAW))
311 goto out_rcu_unlock;
312
313 sock->ops = answer->ops;
314 answer_prot = answer->prot;
315 answer_no_check = answer->no_check;
316 answer_flags = answer->flags;
317 rcu_read_unlock();
318
319 WARN_ON(answer_prot->slab == NULL);
320
321 err = -ENOBUFS;
322 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
323 if (sk == NULL)
324 goto out;
325
326 err = 0;
327 sk->sk_no_check = answer_no_check;
328 if (INET_PROTOSW_REUSE & answer_flags)
329 sk->sk_reuse = SK_CAN_REUSE;
330
331 inet = inet_sk(sk);
332 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
333
334 inet->nodefrag = 0;
335
336 if (SOCK_RAW == sock->type) {
337 inet->inet_num = protocol;
338 if (IPPROTO_RAW == protocol)
339 inet->hdrincl = 1;
340 }
341
342 if (net->ipv4.sysctl_ip_no_pmtu_disc)
343 inet->pmtudisc = IP_PMTUDISC_DONT;
344 else
345 inet->pmtudisc = IP_PMTUDISC_WANT;
346
347 inet->inet_id = 0;
348
349 sock_init_data(sock, sk);
350
351 sk->sk_destruct = inet_sock_destruct;
352 sk->sk_protocol = protocol;
353 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
354
355 inet->uc_ttl = -1;
356 inet->mc_loop = 1;
357 inet->mc_ttl = 1;
358 inet->mc_all = 1;
359 inet->mc_index = 0;
360 inet->mc_list = NULL;
361 inet->rcv_tos = 0;
362
363 sk_refcnt_debug_inc(sk);
364
365 if (inet->inet_num) {
366 /* It assumes that any protocol which allows
367 * the user to assign a number at socket
368 * creation time automatically
369 * shares.
370 */
371 inet->inet_sport = htons(inet->inet_num);
372 /* Add to protocol hash chains. */
373 sk->sk_prot->hash(sk);
374 }
375
376 if (sk->sk_prot->init) {
377 err = sk->sk_prot->init(sk);
378 if (err)
379 sk_common_release(sk);
380 }
381out:
382 return err;
383out_rcu_unlock:
384 rcu_read_unlock();
385 goto out;
386}
387
388
389/*
390 * The peer socket should always be NULL (or else). When we call this
391 * function we are destroying the object and from then on nobody
392 * should refer to it.
393 */
394int inet_release(struct socket *sock)
395{
396 struct sock *sk = sock->sk;
397
398 if (sk) {
399 long timeout;
400
401 sock_rps_reset_flow(sk);
402
403 /* Applications forget to leave groups before exiting */
404 ip_mc_drop_socket(sk);
405
406 /* If linger is set, we don't return until the close
407 * is complete. Otherwise we return immediately. The
408 * actually closing is done the same either way.
409 *
410 * If the close is due to the process exiting, we never
411 * linger..
412 */
413 timeout = 0;
414 if (sock_flag(sk, SOCK_LINGER) &&
415 !(current->flags & PF_EXITING))
416 timeout = sk->sk_lingertime;
417 sock->sk = NULL;
418 sk->sk_prot->close(sk, timeout);
419 }
420 return 0;
421}
422EXPORT_SYMBOL(inet_release);
423
424/* It is off by default, see below. */
425int sysctl_ip_nonlocal_bind __read_mostly;
426EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
427
428int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
429{
430 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
431 struct sock *sk = sock->sk;
432 struct inet_sock *inet = inet_sk(sk);
433 struct net *net = sock_net(sk);
434 unsigned short snum;
435 int chk_addr_ret;
436 int err;
437
438 /* If the socket has its own bind function then use it. (RAW) */
439 if (sk->sk_prot->bind) {
440 err = sk->sk_prot->bind(sk, uaddr, addr_len);
441 goto out;
442 }
443 err = -EINVAL;
444 if (addr_len < sizeof(struct sockaddr_in))
445 goto out;
446
447 if (addr->sin_family != AF_INET) {
448 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
449 * only if s_addr is INADDR_ANY.
450 */
451 err = -EAFNOSUPPORT;
452 if (addr->sin_family != AF_UNSPEC ||
453 addr->sin_addr.s_addr != htonl(INADDR_ANY))
454 goto out;
455 }
456
457 chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
458
459 /* Not specified by any standard per-se, however it breaks too
460 * many applications when removed. It is unfortunate since
461 * allowing applications to make a non-local bind solves
462 * several problems with systems using dynamic addressing.
463 * (ie. your servers still start up even if your ISDN link
464 * is temporarily down)
465 */
466 err = -EADDRNOTAVAIL;
467 if (!sysctl_ip_nonlocal_bind &&
468 !(inet->freebind || inet->transparent) &&
469 addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
470 chk_addr_ret != RTN_LOCAL &&
471 chk_addr_ret != RTN_MULTICAST &&
472 chk_addr_ret != RTN_BROADCAST)
473 goto out;
474
475 snum = ntohs(addr->sin_port);
476 err = -EACCES;
477 if (snum && snum < PROT_SOCK &&
478 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
479 goto out;
480
481 /* We keep a pair of addresses. rcv_saddr is the one
482 * used by hash lookups, and saddr is used for transmit.
483 *
484 * In the BSD API these are the same except where it
485 * would be illegal to use them (multicast/broadcast) in
486 * which case the sending device address is used.
487 */
488 lock_sock(sk);
489
490 /* Check these errors (active socket, double bind). */
491 err = -EINVAL;
492 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
493 goto out_release_sock;
494
495 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
496 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
497 inet->inet_saddr = 0; /* Use device */
498
499 /* Make sure we are allowed to bind here. */
500 if (sk->sk_prot->get_port(sk, snum)) {
501 inet->inet_saddr = inet->inet_rcv_saddr = 0;
502 err = -EADDRINUSE;
503 goto out_release_sock;
504 }
505
506 if (inet->inet_rcv_saddr)
507 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
508 if (snum)
509 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
510 inet->inet_sport = htons(inet->inet_num);
511 inet->inet_daddr = 0;
512 inet->inet_dport = 0;
513 sk_dst_reset(sk);
514 err = 0;
515out_release_sock:
516 release_sock(sk);
517out:
518 return err;
519}
520EXPORT_SYMBOL(inet_bind);
521
522int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
523 int addr_len, int flags)
524{
525 struct sock *sk = sock->sk;
526
527 if (addr_len < sizeof(uaddr->sa_family))
528 return -EINVAL;
529 if (uaddr->sa_family == AF_UNSPEC)
530 return sk->sk_prot->disconnect(sk, flags);
531
532 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
533 return -EAGAIN;
534 return sk->sk_prot->connect(sk, uaddr, addr_len);
535}
536EXPORT_SYMBOL(inet_dgram_connect);
537
538static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
539{
540 DEFINE_WAIT(wait);
541
542 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
543 sk->sk_write_pending += writebias;
544
545 /* Basic assumption: if someone sets sk->sk_err, he _must_
546 * change state of the socket from TCP_SYN_*.
547 * Connect() does not allow to get error notifications
548 * without closing the socket.
549 */
550 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
551 release_sock(sk);
552 timeo = schedule_timeout(timeo);
553 lock_sock(sk);
554 if (signal_pending(current) || !timeo)
555 break;
556 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
557 }
558 finish_wait(sk_sleep(sk), &wait);
559 sk->sk_write_pending -= writebias;
560 return timeo;
561}
562
563/*
564 * Connect to a remote host. There is regrettably still a little
565 * TCP 'magic' in here.
566 */
567int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
568 int addr_len, int flags)
569{
570 struct sock *sk = sock->sk;
571 int err;
572 long timeo;
573
574 if (addr_len < sizeof(uaddr->sa_family))
575 return -EINVAL;
576
577 if (uaddr->sa_family == AF_UNSPEC) {
578 err = sk->sk_prot->disconnect(sk, flags);
579 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
580 goto out;
581 }
582
583 switch (sock->state) {
584 default:
585 err = -EINVAL;
586 goto out;
587 case SS_CONNECTED:
588 err = -EISCONN;
589 goto out;
590 case SS_CONNECTING:
591 err = -EALREADY;
592 /* Fall out of switch with err, set for this state */
593 break;
594 case SS_UNCONNECTED:
595 err = -EISCONN;
596 if (sk->sk_state != TCP_CLOSE)
597 goto out;
598
599 err = sk->sk_prot->connect(sk, uaddr, addr_len);
600 if (err < 0)
601 goto out;
602
603 sock->state = SS_CONNECTING;
604
605 /* Just entered SS_CONNECTING state; the only
606 * difference is that return value in non-blocking
607 * case is EINPROGRESS, rather than EALREADY.
608 */
609 err = -EINPROGRESS;
610 break;
611 }
612
613 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
614
615 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
616 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
617 tcp_sk(sk)->fastopen_req &&
618 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
619
620 /* Error code is set above */
621 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
622 goto out;
623
624 err = sock_intr_errno(timeo);
625 if (signal_pending(current))
626 goto out;
627 }
628
629 /* Connection was closed by RST, timeout, ICMP error
630 * or another process disconnected us.
631 */
632 if (sk->sk_state == TCP_CLOSE)
633 goto sock_error;
634
635 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
636 * and error was received after socket entered established state.
637 * Hence, it is handled normally after connect() return successfully.
638 */
639
640 sock->state = SS_CONNECTED;
641 err = 0;
642out:
643 return err;
644
645sock_error:
646 err = sock_error(sk) ? : -ECONNABORTED;
647 sock->state = SS_UNCONNECTED;
648 if (sk->sk_prot->disconnect(sk, flags))
649 sock->state = SS_DISCONNECTING;
650 goto out;
651}
652EXPORT_SYMBOL(__inet_stream_connect);
653
654int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
655 int addr_len, int flags)
656{
657 int err;
658
659 lock_sock(sock->sk);
660 err = __inet_stream_connect(sock, uaddr, addr_len, flags);
661 release_sock(sock->sk);
662 return err;
663}
664EXPORT_SYMBOL(inet_stream_connect);
665
666/*
667 * Accept a pending connection. The TCP layer now gives BSD semantics.
668 */
669
670int inet_accept(struct socket *sock, struct socket *newsock, int flags)
671{
672 struct sock *sk1 = sock->sk;
673 int err = -EINVAL;
674 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
675
676 if (!sk2)
677 goto do_err;
678
679 lock_sock(sk2);
680
681 sock_rps_record_flow(sk2);
682 WARN_ON(!((1 << sk2->sk_state) &
683 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
684 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
685
686 sock_graft(sk2, newsock);
687
688 newsock->state = SS_CONNECTED;
689 err = 0;
690 release_sock(sk2);
691do_err:
692 return err;
693}
694EXPORT_SYMBOL(inet_accept);
695
696
697/*
698 * This does both peername and sockname.
699 */
700int inet_getname(struct socket *sock, struct sockaddr *uaddr,
701 int *uaddr_len, int peer)
702{
703 struct sock *sk = sock->sk;
704 struct inet_sock *inet = inet_sk(sk);
705 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
706
707 sin->sin_family = AF_INET;
708 if (peer) {
709 if (!inet->inet_dport ||
710 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
711 peer == 1))
712 return -ENOTCONN;
713 sin->sin_port = inet->inet_dport;
714 sin->sin_addr.s_addr = inet->inet_daddr;
715 } else {
716 __be32 addr = inet->inet_rcv_saddr;
717 if (!addr)
718 addr = inet->inet_saddr;
719 sin->sin_port = inet->inet_sport;
720 sin->sin_addr.s_addr = addr;
721 }
722 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
723 *uaddr_len = sizeof(*sin);
724 return 0;
725}
726EXPORT_SYMBOL(inet_getname);
727
728int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
729 size_t size)
730{
731 struct sock *sk = sock->sk;
732
733 sock_rps_record_flow(sk);
734
735 /* We may need to bind the socket. */
736 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
737 inet_autobind(sk))
738 return -EAGAIN;
739
740 return sk->sk_prot->sendmsg(iocb, sk, msg, size);
741}
742EXPORT_SYMBOL(inet_sendmsg);
743
744ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
745 size_t size, int flags)
746{
747 struct sock *sk = sock->sk;
748
749 sock_rps_record_flow(sk);
750
751 /* We may need to bind the socket. */
752 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
753 inet_autobind(sk))
754 return -EAGAIN;
755
756 if (sk->sk_prot->sendpage)
757 return sk->sk_prot->sendpage(sk, page, offset, size, flags);
758 return sock_no_sendpage(sock, page, offset, size, flags);
759}
760EXPORT_SYMBOL(inet_sendpage);
761
762int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
763 size_t size, int flags)
764{
765 struct sock *sk = sock->sk;
766 int addr_len = 0;
767 int err;
768
769 sock_rps_record_flow(sk);
770
771 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
772 flags & ~MSG_DONTWAIT, &addr_len);
773 if (err >= 0)
774 msg->msg_namelen = addr_len;
775 return err;
776}
777EXPORT_SYMBOL(inet_recvmsg);
778
779int inet_shutdown(struct socket *sock, int how)
780{
781 struct sock *sk = sock->sk;
782 int err = 0;
783
784 /* This should really check to make sure
785 * the socket is a TCP socket. (WHY AC...)
786 */
787 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
788 1->2 bit 2 snds.
789 2->3 */
790 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
791 return -EINVAL;
792
793 lock_sock(sk);
794 if (sock->state == SS_CONNECTING) {
795 if ((1 << sk->sk_state) &
796 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
797 sock->state = SS_DISCONNECTING;
798 else
799 sock->state = SS_CONNECTED;
800 }
801
802 switch (sk->sk_state) {
803 case TCP_CLOSE:
804 err = -ENOTCONN;
805 /* Hack to wake up other listeners, who can poll for
806 POLLHUP, even on eg. unconnected UDP sockets -- RR */
807 default:
808 sk->sk_shutdown |= how;
809 if (sk->sk_prot->shutdown)
810 sk->sk_prot->shutdown(sk, how);
811 break;
812
813 /* Remaining two branches are temporary solution for missing
814 * close() in multithreaded environment. It is _not_ a good idea,
815 * but we have no choice until close() is repaired at VFS level.
816 */
817 case TCP_LISTEN:
818 if (!(how & RCV_SHUTDOWN))
819 break;
820 /* Fall through */
821 case TCP_SYN_SENT:
822 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
823 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
824 break;
825 }
826
827 /* Wake up anyone sleeping in poll. */
828 sk->sk_state_change(sk);
829 release_sock(sk);
830 return err;
831}
832EXPORT_SYMBOL(inet_shutdown);
833
834/*
835 * ioctl() calls you can issue on an INET socket. Most of these are
836 * device configuration and stuff and very rarely used. Some ioctls
837 * pass on to the socket itself.
838 *
839 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
840 * loads the devconfigure module does its configuring and unloads it.
841 * There's a good 20K of config code hanging around the kernel.
842 */
843
844int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
845{
846 struct sock *sk = sock->sk;
847 int err = 0;
848 struct net *net = sock_net(sk);
849
850 switch (cmd) {
851 case SIOCGSTAMP:
852 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
853 break;
854 case SIOCGSTAMPNS:
855 err = sock_get_timestampns(sk, (struct timespec __user *)arg);
856 break;
857 case SIOCADDRT:
858 case SIOCDELRT:
859 case SIOCRTMSG:
860 err = ip_rt_ioctl(net, cmd, (void __user *)arg);
861 break;
862 case SIOCDARP:
863 case SIOCGARP:
864 case SIOCSARP:
865 err = arp_ioctl(net, cmd, (void __user *)arg);
866 break;
867 case SIOCGIFADDR:
868 case SIOCSIFADDR:
869 case SIOCGIFBRDADDR:
870 case SIOCSIFBRDADDR:
871 case SIOCGIFNETMASK:
872 case SIOCSIFNETMASK:
873 case SIOCGIFDSTADDR:
874 case SIOCSIFDSTADDR:
875 case SIOCSIFPFLAGS:
876 case SIOCGIFPFLAGS:
877 case SIOCSIFFLAGS:
878 err = devinet_ioctl(net, cmd, (void __user *)arg);
879 break;
880 default:
881 if (sk->sk_prot->ioctl)
882 err = sk->sk_prot->ioctl(sk, cmd, arg);
883 else
884 err = -ENOIOCTLCMD;
885 break;
886 }
887 return err;
888}
889EXPORT_SYMBOL(inet_ioctl);
890
891#ifdef CONFIG_COMPAT
892static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
893{
894 struct sock *sk = sock->sk;
895 int err = -ENOIOCTLCMD;
896
897 if (sk->sk_prot->compat_ioctl)
898 err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
899
900 return err;
901}
902#endif
903
904const struct proto_ops inet_stream_ops = {
905 .family = PF_INET,
906 .owner = THIS_MODULE,
907 .release = inet_release,
908 .bind = inet_bind,
909 .connect = inet_stream_connect,
910 .socketpair = sock_no_socketpair,
911 .accept = inet_accept,
912 .getname = inet_getname,
913 .poll = tcp_poll,
914 .ioctl = inet_ioctl,
915 .listen = inet_listen,
916 .shutdown = inet_shutdown,
917 .setsockopt = sock_common_setsockopt,
918 .getsockopt = sock_common_getsockopt,
919 .sendmsg = inet_sendmsg,
920 .recvmsg = inet_recvmsg,
921 .mmap = sock_no_mmap,
922 .sendpage = inet_sendpage,
923 .splice_read = tcp_splice_read,
924#ifdef CONFIG_COMPAT
925 .compat_setsockopt = compat_sock_common_setsockopt,
926 .compat_getsockopt = compat_sock_common_getsockopt,
927 .compat_ioctl = inet_compat_ioctl,
928#endif
929};
930EXPORT_SYMBOL(inet_stream_ops);
931
932const struct proto_ops inet_dgram_ops = {
933 .family = PF_INET,
934 .owner = THIS_MODULE,
935 .release = inet_release,
936 .bind = inet_bind,
937 .connect = inet_dgram_connect,
938 .socketpair = sock_no_socketpair,
939 .accept = sock_no_accept,
940 .getname = inet_getname,
941 .poll = udp_poll,
942 .ioctl = inet_ioctl,
943 .listen = sock_no_listen,
944 .shutdown = inet_shutdown,
945 .setsockopt = sock_common_setsockopt,
946 .getsockopt = sock_common_getsockopt,
947 .sendmsg = inet_sendmsg,
948 .recvmsg = inet_recvmsg,
949 .mmap = sock_no_mmap,
950 .sendpage = inet_sendpage,
951#ifdef CONFIG_COMPAT
952 .compat_setsockopt = compat_sock_common_setsockopt,
953 .compat_getsockopt = compat_sock_common_getsockopt,
954 .compat_ioctl = inet_compat_ioctl,
955#endif
956};
957EXPORT_SYMBOL(inet_dgram_ops);
958
959/*
960 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
961 * udp_poll
962 */
963static const struct proto_ops inet_sockraw_ops = {
964 .family = PF_INET,
965 .owner = THIS_MODULE,
966 .release = inet_release,
967 .bind = inet_bind,
968 .connect = inet_dgram_connect,
969 .socketpair = sock_no_socketpair,
970 .accept = sock_no_accept,
971 .getname = inet_getname,
972 .poll = datagram_poll,
973 .ioctl = inet_ioctl,
974 .listen = sock_no_listen,
975 .shutdown = inet_shutdown,
976 .setsockopt = sock_common_setsockopt,
977 .getsockopt = sock_common_getsockopt,
978 .sendmsg = inet_sendmsg,
979 .recvmsg = inet_recvmsg,
980 .mmap = sock_no_mmap,
981 .sendpage = inet_sendpage,
982#ifdef CONFIG_COMPAT
983 .compat_setsockopt = compat_sock_common_setsockopt,
984 .compat_getsockopt = compat_sock_common_getsockopt,
985 .compat_ioctl = inet_compat_ioctl,
986#endif
987};
988
989static const struct net_proto_family inet_family_ops = {
990 .family = PF_INET,
991 .create = inet_create,
992 .owner = THIS_MODULE,
993};
994
995/* Upon startup we insert all the elements in inetsw_array[] into
996 * the linked list inetsw.
997 */
998static struct inet_protosw inetsw_array[] =
999{
1000 {
1001 .type = SOCK_STREAM,
1002 .protocol = IPPROTO_TCP,
1003 .prot = &tcp_prot,
1004 .ops = &inet_stream_ops,
1005 .no_check = 0,
1006 .flags = INET_PROTOSW_PERMANENT |
1007 INET_PROTOSW_ICSK,
1008 },
1009
1010 {
1011 .type = SOCK_DGRAM,
1012 .protocol = IPPROTO_UDP,
1013 .prot = &udp_prot,
1014 .ops = &inet_dgram_ops,
1015 .no_check = UDP_CSUM_DEFAULT,
1016 .flags = INET_PROTOSW_PERMANENT,
1017 },
1018
1019 {
1020 .type = SOCK_DGRAM,
1021 .protocol = IPPROTO_ICMP,
1022 .prot = &ping_prot,
1023 .ops = &inet_dgram_ops,
1024 .no_check = UDP_CSUM_DEFAULT,
1025 .flags = INET_PROTOSW_REUSE,
1026 },
1027
1028 {
1029 .type = SOCK_RAW,
1030 .protocol = IPPROTO_IP, /* wild card */
1031 .prot = &raw_prot,
1032 .ops = &inet_sockraw_ops,
1033 .no_check = UDP_CSUM_DEFAULT,
1034 .flags = INET_PROTOSW_REUSE,
1035 }
1036};
1037
1038#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1039
1040void inet_register_protosw(struct inet_protosw *p)
1041{
1042 struct list_head *lh;
1043 struct inet_protosw *answer;
1044 int protocol = p->protocol;
1045 struct list_head *last_perm;
1046
1047 spin_lock_bh(&inetsw_lock);
1048
1049 if (p->type >= SOCK_MAX)
1050 goto out_illegal;
1051
1052 /* If we are trying to override a permanent protocol, bail. */
1053 answer = NULL;
1054 last_perm = &inetsw[p->type];
1055 list_for_each(lh, &inetsw[p->type]) {
1056 answer = list_entry(lh, struct inet_protosw, list);
1057
1058 /* Check only the non-wild match. */
1059 if (INET_PROTOSW_PERMANENT & answer->flags) {
1060 if (protocol == answer->protocol)
1061 break;
1062 last_perm = lh;
1063 }
1064
1065 answer = NULL;
1066 }
1067 if (answer)
1068 goto out_permanent;
1069
1070 /* Add the new entry after the last permanent entry if any, so that
1071 * the new entry does not override a permanent entry when matched with
1072 * a wild-card protocol. But it is allowed to override any existing
1073 * non-permanent entry. This means that when we remove this entry, the
1074 * system automatically returns to the old behavior.
1075 */
1076 list_add_rcu(&p->list, last_perm);
1077out:
1078 spin_unlock_bh(&inetsw_lock);
1079
1080 return;
1081
1082out_permanent:
1083 pr_err("Attempt to override permanent protocol %d\n", protocol);
1084 goto out;
1085
1086out_illegal:
1087 pr_err("Ignoring attempt to register invalid socket type %d\n",
1088 p->type);
1089 goto out;
1090}
1091EXPORT_SYMBOL(inet_register_protosw);
1092
1093void inet_unregister_protosw(struct inet_protosw *p)
1094{
1095 if (INET_PROTOSW_PERMANENT & p->flags) {
1096 pr_err("Attempt to unregister permanent protocol %d\n",
1097 p->protocol);
1098 } else {
1099 spin_lock_bh(&inetsw_lock);
1100 list_del_rcu(&p->list);
1101 spin_unlock_bh(&inetsw_lock);
1102
1103 synchronize_net();
1104 }
1105}
1106EXPORT_SYMBOL(inet_unregister_protosw);
1107
1108/*
1109 * Shall we try to damage output packets if routing dev changes?
1110 */
1111
1112int sysctl_ip_dynaddr __read_mostly;
1113
1114static int inet_sk_reselect_saddr(struct sock *sk)
1115{
1116 struct inet_sock *inet = inet_sk(sk);
1117 __be32 old_saddr = inet->inet_saddr;
1118 __be32 daddr = inet->inet_daddr;
1119 struct flowi4 *fl4;
1120 struct rtable *rt;
1121 __be32 new_saddr;
1122 struct ip_options_rcu *inet_opt;
1123
1124 inet_opt = rcu_dereference_protected(inet->inet_opt,
1125 sock_owned_by_user(sk));
1126 if (inet_opt && inet_opt->opt.srr)
1127 daddr = inet_opt->opt.faddr;
1128
1129 /* Query new route. */
1130 fl4 = &inet->cork.fl.u.ip4;
1131 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1132 sk->sk_bound_dev_if, sk->sk_protocol,
1133 inet->inet_sport, inet->inet_dport, sk);
1134 if (IS_ERR(rt))
1135 return PTR_ERR(rt);
1136
1137 sk_setup_caps(sk, &rt->dst);
1138
1139 new_saddr = fl4->saddr;
1140
1141 if (new_saddr == old_saddr)
1142 return 0;
1143
1144 if (sysctl_ip_dynaddr > 1) {
1145 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1146 __func__, &old_saddr, &new_saddr);
1147 }
1148
1149 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
1150
1151 /*
1152 * XXX The only one ugly spot where we need to
1153 * XXX really change the sockets identity after
1154 * XXX it has entered the hashes. -DaveM
1155 *
1156 * Besides that, it does not check for connection
1157 * uniqueness. Wait for troubles.
1158 */
1159 __sk_prot_rehash(sk);
1160 return 0;
1161}
1162
1163int inet_sk_rebuild_header(struct sock *sk)
1164{
1165 struct inet_sock *inet = inet_sk(sk);
1166 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1167 __be32 daddr;
1168 struct ip_options_rcu *inet_opt;
1169 struct flowi4 *fl4;
1170 int err;
1171
1172 /* Route is OK, nothing to do. */
1173 if (rt)
1174 return 0;
1175
1176 /* Reroute. */
1177 rcu_read_lock();
1178 inet_opt = rcu_dereference(inet->inet_opt);
1179 daddr = inet->inet_daddr;
1180 if (inet_opt && inet_opt->opt.srr)
1181 daddr = inet_opt->opt.faddr;
1182 rcu_read_unlock();
1183 fl4 = &inet->cork.fl.u.ip4;
1184 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1185 inet->inet_dport, inet->inet_sport,
1186 sk->sk_protocol, RT_CONN_FLAGS(sk),
1187 sk->sk_bound_dev_if);
1188 if (!IS_ERR(rt)) {
1189 err = 0;
1190 sk_setup_caps(sk, &rt->dst);
1191 } else {
1192 err = PTR_ERR(rt);
1193
1194 /* Routing failed... */
1195 sk->sk_route_caps = 0;
1196 /*
1197 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1198 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1199 */
1200 if (!sysctl_ip_dynaddr ||
1201 sk->sk_state != TCP_SYN_SENT ||
1202 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1203 (err = inet_sk_reselect_saddr(sk)) != 0)
1204 sk->sk_err_soft = -err;
1205 }
1206
1207 return err;
1208}
1209EXPORT_SYMBOL(inet_sk_rebuild_header);
1210
1211static int inet_gso_send_check(struct sk_buff *skb)
1212{
1213 const struct net_offload *ops;
1214 const struct iphdr *iph;
1215 int proto;
1216 int ihl;
1217 int err = -EINVAL;
1218
1219 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1220 goto out;
1221
1222 iph = ip_hdr(skb);
1223 ihl = iph->ihl * 4;
1224 if (ihl < sizeof(*iph))
1225 goto out;
1226
1227 proto = iph->protocol;
1228
1229 /* Warning: after this point, iph might be no longer valid */
1230 if (unlikely(!pskb_may_pull(skb, ihl)))
1231 goto out;
1232 __skb_pull(skb, ihl);
1233
1234 skb_reset_transport_header(skb);
1235 err = -EPROTONOSUPPORT;
1236
1237 ops = rcu_dereference(inet_offloads[proto]);
1238 if (likely(ops && ops->callbacks.gso_send_check))
1239 err = ops->callbacks.gso_send_check(skb);
1240
1241out:
1242 return err;
1243}
1244
1245static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1246 netdev_features_t features)
1247{
1248 struct sk_buff *segs = ERR_PTR(-EINVAL);
1249 const struct net_offload *ops;
1250 unsigned int offset = 0;
1251 bool udpfrag, encap;
1252 struct iphdr *iph;
1253 int proto;
1254 int nhoff;
1255 int ihl;
1256 int id;
1257
1258 if (unlikely(skb_shinfo(skb)->gso_type &
1259 ~(SKB_GSO_TCPV4 |
1260 SKB_GSO_UDP |
1261 SKB_GSO_DODGY |
1262 SKB_GSO_TCP_ECN |
1263 SKB_GSO_GRE |
1264 SKB_GSO_IPIP |
1265 SKB_GSO_SIT |
1266 SKB_GSO_TCPV6 |
1267 SKB_GSO_UDP_TUNNEL |
1268 SKB_GSO_MPLS |
1269 0)))
1270 goto out;
1271
1272 skb_reset_network_header(skb);
1273 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1274 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1275 goto out;
1276
1277 iph = ip_hdr(skb);
1278 ihl = iph->ihl * 4;
1279 if (ihl < sizeof(*iph))
1280 goto out;
1281
1282 id = ntohs(iph->id);
1283 proto = iph->protocol;
1284
1285 /* Warning: after this point, iph might be no longer valid */
1286 if (unlikely(!pskb_may_pull(skb, ihl)))
1287 goto out;
1288 __skb_pull(skb, ihl);
1289
1290 encap = SKB_GSO_CB(skb)->encap_level > 0;
1291 if (encap)
1292 features = skb->dev->hw_enc_features & netif_skb_features(skb);
1293 SKB_GSO_CB(skb)->encap_level += ihl;
1294
1295 skb_reset_transport_header(skb);
1296
1297 segs = ERR_PTR(-EPROTONOSUPPORT);
1298
1299 if (skb->encapsulation &&
1300 skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
1301 udpfrag = proto == IPPROTO_UDP && encap;
1302 else
1303 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
1304
1305 ops = rcu_dereference(inet_offloads[proto]);
1306 if (likely(ops && ops->callbacks.gso_segment))
1307 segs = ops->callbacks.gso_segment(skb, features);
1308
1309 if (IS_ERR_OR_NULL(segs))
1310 goto out;
1311
1312 skb = segs;
1313 do {
1314 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1315 if (udpfrag) {
1316 iph->id = htons(id);
1317 iph->frag_off = htons(offset >> 3);
1318 if (skb->next != NULL)
1319 iph->frag_off |= htons(IP_MF);
1320 offset += skb->len - nhoff - ihl;
1321 } else {
1322 iph->id = htons(id++);
1323 }
1324 iph->tot_len = htons(skb->len - nhoff);
1325 ip_send_check(iph);
1326 if (encap)
1327 skb_reset_inner_headers(skb);
1328 skb->network_header = (u8 *)iph - skb->head;
1329 } while ((skb = skb->next));
1330
1331out:
1332 return segs;
1333}
1334
1335static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1336 struct sk_buff *skb)
1337{
1338 const struct net_offload *ops;
1339 struct sk_buff **pp = NULL;
1340 struct sk_buff *p;
1341 const struct iphdr *iph;
1342 unsigned int hlen;
1343 unsigned int off;
1344 unsigned int id;
1345 int flush = 1;
1346 int proto;
1347
1348 off = skb_gro_offset(skb);
1349 hlen = off + sizeof(*iph);
1350 iph = skb_gro_header_fast(skb, off);
1351 if (skb_gro_header_hard(skb, hlen)) {
1352 iph = skb_gro_header_slow(skb, hlen, off);
1353 if (unlikely(!iph))
1354 goto out;
1355 }
1356
1357 proto = iph->protocol;
1358
1359 rcu_read_lock();
1360 ops = rcu_dereference(inet_offloads[proto]);
1361 if (!ops || !ops->callbacks.gro_receive)
1362 goto out_unlock;
1363
1364 if (*(u8 *)iph != 0x45)
1365 goto out_unlock;
1366
1367 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1368 goto out_unlock;
1369
1370 id = ntohl(*(__be32 *)&iph->id);
1371 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1372 id >>= 16;
1373
1374 for (p = *head; p; p = p->next) {
1375 struct iphdr *iph2;
1376
1377 if (!NAPI_GRO_CB(p)->same_flow)
1378 continue;
1379
1380 iph2 = (struct iphdr *)(p->data + off);
1381 /* The above works because, with the exception of the top
1382 * (inner most) layer, we only aggregate pkts with the same
1383 * hdr length so all the hdrs we'll need to verify will start
1384 * at the same offset.
1385 */
1386 if ((iph->protocol ^ iph2->protocol) |
1387 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1388 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1389 NAPI_GRO_CB(p)->same_flow = 0;
1390 continue;
1391 }
1392
1393 /* All fields must match except length and checksum. */
1394 NAPI_GRO_CB(p)->flush |=
1395 (iph->ttl ^ iph2->ttl) |
1396 (iph->tos ^ iph2->tos) |
1397 ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
1398
1399 /* Save the IP ID check to be included later when we get to
1400 * the transport layer so only the inner most IP ID is checked.
1401 * This is because some GSO/TSO implementations do not
1402 * correctly increment the IP ID for the outer hdrs.
1403 */
1404 NAPI_GRO_CB(p)->flush_id =
1405 ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
1406 NAPI_GRO_CB(p)->flush |= flush;
1407 }
1408
1409 NAPI_GRO_CB(skb)->flush |= flush;
1410 skb_set_network_header(skb, off);
1411 /* The above will be needed by the transport layer if there is one
1412 * immediately following this IP hdr.
1413 */
1414
1415 skb_gro_pull(skb, sizeof(*iph));
1416 skb_set_transport_header(skb, skb_gro_offset(skb));
1417
1418 pp = ops->callbacks.gro_receive(head, skb);
1419
1420out_unlock:
1421 rcu_read_unlock();
1422
1423out:
1424 NAPI_GRO_CB(skb)->flush |= flush;
1425
1426 return pp;
1427}
1428
1429static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1430{
1431 __be16 newlen = htons(skb->len - nhoff);
1432 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1433 const struct net_offload *ops;
1434 int proto = iph->protocol;
1435 int err = -ENOSYS;
1436
1437 csum_replace2(&iph->check, iph->tot_len, newlen);
1438 iph->tot_len = newlen;
1439
1440 rcu_read_lock();
1441 ops = rcu_dereference(inet_offloads[proto]);
1442 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1443 goto out_unlock;
1444
1445 /* Only need to add sizeof(*iph) to get to the next hdr below
1446 * because any hdr with option will have been flushed in
1447 * inet_gro_receive().
1448 */
1449 err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
1450
1451out_unlock:
1452 rcu_read_unlock();
1453
1454 return err;
1455}
1456
1457int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1458 unsigned short type, unsigned char protocol,
1459 struct net *net)
1460{
1461 struct socket *sock;
1462 int rc = sock_create_kern(family, type, protocol, &sock);
1463
1464 if (rc == 0) {
1465 *sk = sock->sk;
1466 (*sk)->sk_allocation = GFP_ATOMIC;
1467 /*
1468 * Unhash it so that IP input processing does not even see it,
1469 * we do not wish this socket to see incoming packets.
1470 */
1471 (*sk)->sk_prot->unhash(*sk);
1472
1473 sk_change_net(*sk, net);
1474 }
1475 return rc;
1476}
1477EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1478
1479unsigned long snmp_fold_field(void __percpu *mib[], int offt)
1480{
1481 unsigned long res = 0;
1482 int i, j;
1483
1484 for_each_possible_cpu(i) {
1485 for (j = 0; j < SNMP_ARRAY_SZ; j++)
1486 res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
1487 }
1488 return res;
1489}
1490EXPORT_SYMBOL_GPL(snmp_fold_field);
1491
1492#if BITS_PER_LONG==32
1493
1494u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1495{
1496 u64 res = 0;
1497 int cpu;
1498
1499 for_each_possible_cpu(cpu) {
1500 void *bhptr;
1501 struct u64_stats_sync *syncp;
1502 u64 v;
1503 unsigned int start;
1504
1505 bhptr = per_cpu_ptr(mib[0], cpu);
1506 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1507 do {
1508 start = u64_stats_fetch_begin_irq(syncp);
1509 v = *(((u64 *) bhptr) + offt);
1510 } while (u64_stats_fetch_retry_irq(syncp, start));
1511
1512 res += v;
1513 }
1514 return res;
1515}
1516EXPORT_SYMBOL_GPL(snmp_fold_field64);
1517#endif
1518
1519int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
1520{
1521 BUG_ON(ptr == NULL);
1522 ptr[0] = __alloc_percpu(mibsize, align);
1523 if (!ptr[0])
1524 return -ENOMEM;
1525
1526#if SNMP_ARRAY_SZ == 2
1527 ptr[1] = __alloc_percpu(mibsize, align);
1528 if (!ptr[1]) {
1529 free_percpu(ptr[0]);
1530 ptr[0] = NULL;
1531 return -ENOMEM;
1532 }
1533#endif
1534 return 0;
1535}
1536EXPORT_SYMBOL_GPL(snmp_mib_init);
1537
1538#ifdef CONFIG_IP_MULTICAST
1539static const struct net_protocol igmp_protocol = {
1540 .handler = igmp_rcv,
1541 .netns_ok = 1,
1542};
1543#endif
1544
1545static const struct net_protocol tcp_protocol = {
1546 .early_demux = tcp_v4_early_demux,
1547 .handler = tcp_v4_rcv,
1548 .err_handler = tcp_v4_err,
1549 .no_policy = 1,
1550 .netns_ok = 1,
1551 .icmp_strict_tag_validation = 1,
1552};
1553
1554static const struct net_protocol udp_protocol = {
1555 .early_demux = udp_v4_early_demux,
1556 .handler = udp_rcv,
1557 .err_handler = udp_err,
1558 .no_policy = 1,
1559 .netns_ok = 1,
1560};
1561
1562static const struct net_protocol icmp_protocol = {
1563 .handler = icmp_rcv,
1564 .err_handler = icmp_err,
1565 .no_policy = 1,
1566 .netns_ok = 1,
1567};
1568
1569static __net_init int ipv4_mib_init_net(struct net *net)
1570{
1571 int i;
1572
1573 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
1574 sizeof(struct tcp_mib),
1575 __alignof__(struct tcp_mib)) < 0)
1576 goto err_tcp_mib;
1577 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
1578 sizeof(struct ipstats_mib),
1579 __alignof__(struct ipstats_mib)) < 0)
1580 goto err_ip_mib;
1581
1582 for_each_possible_cpu(i) {
1583 struct ipstats_mib *af_inet_stats;
1584 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
1585 u64_stats_init(&af_inet_stats->syncp);
1586#if SNMP_ARRAY_SZ == 2
1587 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
1588 u64_stats_init(&af_inet_stats->syncp);
1589#endif
1590 }
1591
1592 if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
1593 sizeof(struct linux_mib),
1594 __alignof__(struct linux_mib)) < 0)
1595 goto err_net_mib;
1596 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
1597 sizeof(struct udp_mib),
1598 __alignof__(struct udp_mib)) < 0)
1599 goto err_udp_mib;
1600 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
1601 sizeof(struct udp_mib),
1602 __alignof__(struct udp_mib)) < 0)
1603 goto err_udplite_mib;
1604 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
1605 sizeof(struct icmp_mib),
1606 __alignof__(struct icmp_mib)) < 0)
1607 goto err_icmp_mib;
1608 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1609 GFP_KERNEL);
1610 if (!net->mib.icmpmsg_statistics)
1611 goto err_icmpmsg_mib;
1612
1613 tcp_mib_init(net);
1614 return 0;
1615
1616err_icmpmsg_mib:
1617 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1618err_icmp_mib:
1619 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1620err_udplite_mib:
1621 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1622err_udp_mib:
1623 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1624err_net_mib:
1625 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1626err_ip_mib:
1627 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1628err_tcp_mib:
1629 return -ENOMEM;
1630}
1631
1632static __net_exit void ipv4_mib_exit_net(struct net *net)
1633{
1634 kfree(net->mib.icmpmsg_statistics);
1635 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1636 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1637 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1638 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1639 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1640 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1641}
1642
1643static __net_initdata struct pernet_operations ipv4_mib_ops = {
1644 .init = ipv4_mib_init_net,
1645 .exit = ipv4_mib_exit_net,
1646};
1647
1648static int __init init_ipv4_mibs(void)
1649{
1650 return register_pernet_subsys(&ipv4_mib_ops);
1651}
1652
1653static __net_init int inet_init_net(struct net *net)
1654{
1655 /*
1656 * Set defaults for local port range
1657 */
1658 seqlock_init(&net->ipv4.ip_local_ports.lock);
1659 net->ipv4.ip_local_ports.range[0] = 32768;
1660 net->ipv4.ip_local_ports.range[1] = 61000;
1661
1662 seqlock_init(&net->ipv4.ping_group_range.lock);
1663 /*
1664 * Sane defaults - nobody may create ping sockets.
1665 * Boot scripts should set this to distro-specific group.
1666 */
1667 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1668 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1669 return 0;
1670}
1671
1672static __net_exit void inet_exit_net(struct net *net)
1673{
1674}
1675
1676static __net_initdata struct pernet_operations af_inet_ops = {
1677 .init = inet_init_net,
1678 .exit = inet_exit_net,
1679};
1680
1681static int __init init_inet_pernet_ops(void)
1682{
1683 return register_pernet_subsys(&af_inet_ops);
1684}
1685
1686static int ipv4_proc_init(void);
1687
1688/*
1689 * IP protocol layer initialiser
1690 */
1691
1692static struct packet_offload ip_packet_offload __read_mostly = {
1693 .type = cpu_to_be16(ETH_P_IP),
1694 .callbacks = {
1695 .gso_send_check = inet_gso_send_check,
1696 .gso_segment = inet_gso_segment,
1697 .gro_receive = inet_gro_receive,
1698 .gro_complete = inet_gro_complete,
1699 },
1700};
1701
1702static const struct net_offload ipip_offload = {
1703 .callbacks = {
1704 .gso_send_check = inet_gso_send_check,
1705 .gso_segment = inet_gso_segment,
1706 },
1707};
1708
1709static int __init ipv4_offload_init(void)
1710{
1711 /*
1712 * Add offloads
1713 */
1714 if (udpv4_offload_init() < 0)
1715 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1716 if (tcpv4_offload_init() < 0)
1717 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1718
1719 dev_add_offload(&ip_packet_offload);
1720 inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1721 return 0;
1722}
1723
1724fs_initcall(ipv4_offload_init);
1725
1726static struct packet_type ip_packet_type __read_mostly = {
1727 .type = cpu_to_be16(ETH_P_IP),
1728 .func = ip_rcv,
1729};
1730
1731static int __init inet_init(void)
1732{
1733 struct inet_protosw *q;
1734 struct list_head *r;
1735 int rc = -EINVAL;
1736
1737 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
1738
1739 sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
1740 if (!sysctl_local_reserved_ports)
1741 goto out;
1742
1743 rc = proto_register(&tcp_prot, 1);
1744 if (rc)
1745 goto out_free_reserved_ports;
1746
1747 rc = proto_register(&udp_prot, 1);
1748 if (rc)
1749 goto out_unregister_tcp_proto;
1750
1751 rc = proto_register(&raw_prot, 1);
1752 if (rc)
1753 goto out_unregister_udp_proto;
1754
1755 rc = proto_register(&ping_prot, 1);
1756 if (rc)
1757 goto out_unregister_raw_proto;
1758
1759 /*
1760 * Tell SOCKET that we are alive...
1761 */
1762
1763 (void)sock_register(&inet_family_ops);
1764
1765#ifdef CONFIG_SYSCTL
1766 ip_static_sysctl_init();
1767#endif
1768
1769 /*
1770 * Add all the base protocols.
1771 */
1772
1773 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1774 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1775 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1776 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1777 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1778 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1779#ifdef CONFIG_IP_MULTICAST
1780 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1781 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1782#endif
1783
1784 /* Register the socket-side information for inet_create. */
1785 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1786 INIT_LIST_HEAD(r);
1787
1788 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1789 inet_register_protosw(q);
1790
1791 /*
1792 * Set the ARP module up
1793 */
1794
1795 arp_init();
1796
1797 /*
1798 * Set the IP module up
1799 */
1800
1801 ip_init();
1802
1803 tcp_v4_init();
1804
1805 /* Setup TCP slab cache for open requests. */
1806 tcp_init();
1807
1808 /* Setup UDP memory threshold */
1809 udp_init();
1810
1811 /* Add UDP-Lite (RFC 3828) */
1812 udplite4_register();
1813
1814 ping_init();
1815
1816 /*
1817 * Set the ICMP layer up
1818 */
1819
1820 if (icmp_init() < 0)
1821 panic("Failed to create the ICMP control socket.\n");
1822
1823 /*
1824 * Initialise the multicast router
1825 */
1826#if defined(CONFIG_IP_MROUTE)
1827 if (ip_mr_init())
1828 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1829#endif
1830
1831 if (init_inet_pernet_ops())
1832 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1833 /*
1834 * Initialise per-cpu ipv4 mibs
1835 */
1836
1837 if (init_ipv4_mibs())
1838 pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1839
1840 ipv4_proc_init();
1841
1842 ipfrag_init();
1843
1844 dev_add_pack(&ip_packet_type);
1845
1846 rc = 0;
1847out:
1848 return rc;
1849out_unregister_raw_proto:
1850 proto_unregister(&raw_prot);
1851out_unregister_udp_proto:
1852 proto_unregister(&udp_prot);
1853out_unregister_tcp_proto:
1854 proto_unregister(&tcp_prot);
1855out_free_reserved_ports:
1856 kfree(sysctl_local_reserved_ports);
1857 goto out;
1858}
1859
1860fs_initcall(inet_init);
1861
1862/* ------------------------------------------------------------------------ */
1863
1864#ifdef CONFIG_PROC_FS
1865static int __init ipv4_proc_init(void)
1866{
1867 int rc = 0;
1868
1869 if (raw_proc_init())
1870 goto out_raw;
1871 if (tcp4_proc_init())
1872 goto out_tcp;
1873 if (udp4_proc_init())
1874 goto out_udp;
1875 if (ping_proc_init())
1876 goto out_ping;
1877 if (ip_misc_proc_init())
1878 goto out_misc;
1879out:
1880 return rc;
1881out_misc:
1882 ping_proc_exit();
1883out_ping:
1884 udp4_proc_exit();
1885out_udp:
1886 tcp4_proc_exit();
1887out_tcp:
1888 raw_proc_exit();
1889out_raw:
1890 rc = -ENOMEM;
1891 goto out;
1892}
1893
1894#else /* CONFIG_PROC_FS */
1895static int __init ipv4_proc_init(void)
1896{
1897 return 0;
1898}
1899#endif /* CONFIG_PROC_FS */
1900
1901MODULE_ALIAS_NETPROTO(PF_INET);
1902
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PF_INET protocol family socket handler.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Florian La Roche, <flla@stud.uni-sb.de>
11 * Alan Cox, <A.Cox@swansea.ac.uk>
12 *
13 * Changes (see also sock.c)
14 *
15 * piggy,
16 * Karl Knutson : Socket protocol table
17 * A.N.Kuznetsov : Socket death error in accept().
18 * John Richardson : Fix non blocking error in connect()
19 * so sockets that fail to connect
20 * don't return -EINPROGRESS.
21 * Alan Cox : Asynchronous I/O support
22 * Alan Cox : Keep correct socket pointer on sock
23 * structures
24 * when accept() ed
25 * Alan Cox : Semantics of SO_LINGER aren't state
26 * moved to close when you look carefully.
27 * With this fixed and the accept bug fixed
28 * some RPC stuff seems happier.
29 * Niibe Yutaka : 4.4BSD style write async I/O
30 * Alan Cox,
31 * Tony Gale : Fixed reuse semantics.
32 * Alan Cox : bind() shouldn't abort existing but dead
33 * sockets. Stops FTP netin:.. I hope.
34 * Alan Cox : bind() works correctly for RAW sockets.
35 * Note that FreeBSD at least was broken
36 * in this respect so be careful with
37 * compatibility tests...
38 * Alan Cox : routing cache support
39 * Alan Cox : memzero the socket structure for
40 * compactness.
41 * Matt Day : nonblock connect error handler
42 * Alan Cox : Allow large numbers of pending sockets
43 * (eg for big web sites), but only if
44 * specifically application requested.
45 * Alan Cox : New buffering throughout IP. Used
46 * dumbly.
47 * Alan Cox : New buffering now used smartly.
48 * Alan Cox : BSD rather than common sense
49 * interpretation of listen.
50 * Germano Caronni : Assorted small races.
51 * Alan Cox : sendmsg/recvmsg basic support.
52 * Alan Cox : Only sendmsg/recvmsg now supported.
53 * Alan Cox : Locked down bind (see security list).
54 * Alan Cox : Loosened bind a little.
55 * Mike McLagan : ADD/DEL DLCI Ioctls
56 * Willy Konynenberg : Transparent proxying support.
57 * David S. Miller : New socket lookup architecture.
58 * Some other random speedups.
59 * Cyrus Durgin : Cleaned up file for kmod hacks.
60 * Andi Kleen : Fix inet_stream_connect TCP race.
61 *
62 * This program is free software; you can redistribute it and/or
63 * modify it under the terms of the GNU General Public License
64 * as published by the Free Software Foundation; either version
65 * 2 of the License, or (at your option) any later version.
66 */
67
68#define pr_fmt(fmt) "IPv4: " fmt
69
70#include <linux/err.h>
71#include <linux/errno.h>
72#include <linux/types.h>
73#include <linux/socket.h>
74#include <linux/in.h>
75#include <linux/kernel.h>
76#include <linux/kmod.h>
77#include <linux/sched.h>
78#include <linux/timer.h>
79#include <linux/string.h>
80#include <linux/sockios.h>
81#include <linux/net.h>
82#include <linux/capability.h>
83#include <linux/fcntl.h>
84#include <linux/mm.h>
85#include <linux/interrupt.h>
86#include <linux/stat.h>
87#include <linux/init.h>
88#include <linux/poll.h>
89#include <linux/netfilter_ipv4.h>
90#include <linux/random.h>
91#include <linux/slab.h>
92
93#include <linux/uaccess.h>
94
95#include <linux/inet.h>
96#include <linux/igmp.h>
97#include <linux/inetdevice.h>
98#include <linux/netdevice.h>
99#include <net/checksum.h>
100#include <net/ip.h>
101#include <net/protocol.h>
102#include <net/arp.h>
103#include <net/route.h>
104#include <net/ip_fib.h>
105#include <net/inet_connection_sock.h>
106#include <net/tcp.h>
107#include <net/udp.h>
108#include <net/udplite.h>
109#include <net/ping.h>
110#include <linux/skbuff.h>
111#include <net/sock.h>
112#include <net/raw.h>
113#include <net/icmp.h>
114#include <net/inet_common.h>
115#include <net/ip_tunnels.h>
116#include <net/xfrm.h>
117#include <net/net_namespace.h>
118#include <net/secure_seq.h>
119#ifdef CONFIG_IP_MROUTE
120#include <linux/mroute.h>
121#endif
122#include <net/l3mdev.h>
123
124#include <trace/events/sock.h>
125
126/* The inetsw table contains everything that inet_create needs to
127 * build a new socket.
128 */
129static struct list_head inetsw[SOCK_MAX];
130static DEFINE_SPINLOCK(inetsw_lock);
131
132/* New destruction routine */
133
134void inet_sock_destruct(struct sock *sk)
135{
136 struct inet_sock *inet = inet_sk(sk);
137
138 __skb_queue_purge(&sk->sk_receive_queue);
139 __skb_queue_purge(&sk->sk_error_queue);
140
141 sk_mem_reclaim(sk);
142
143 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
144 pr_err("Attempt to release TCP socket in state %d %p\n",
145 sk->sk_state, sk);
146 return;
147 }
148 if (!sock_flag(sk, SOCK_DEAD)) {
149 pr_err("Attempt to release alive inet socket %p\n", sk);
150 return;
151 }
152
153 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
154 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
155 WARN_ON(sk->sk_wmem_queued);
156 WARN_ON(sk->sk_forward_alloc);
157
158 kfree(rcu_dereference_protected(inet->inet_opt, 1));
159 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
160 dst_release(sk->sk_rx_dst);
161 sk_refcnt_debug_dec(sk);
162}
163EXPORT_SYMBOL(inet_sock_destruct);
164
165/*
166 * The routines beyond this point handle the behaviour of an AF_INET
167 * socket object. Mostly it punts to the subprotocols of IP to do
168 * the work.
169 */
170
171/*
172 * Automatically bind an unbound socket.
173 */
174
175static int inet_autobind(struct sock *sk)
176{
177 struct inet_sock *inet;
178 /* We may need to bind the socket. */
179 lock_sock(sk);
180 inet = inet_sk(sk);
181 if (!inet->inet_num) {
182 if (sk->sk_prot->get_port(sk, 0)) {
183 release_sock(sk);
184 return -EAGAIN;
185 }
186 inet->inet_sport = htons(inet->inet_num);
187 }
188 release_sock(sk);
189 return 0;
190}
191
192/*
193 * Move a socket into listening state.
194 */
195int inet_listen(struct socket *sock, int backlog)
196{
197 struct sock *sk = sock->sk;
198 unsigned char old_state;
199 int err, tcp_fastopen;
200
201 lock_sock(sk);
202
203 err = -EINVAL;
204 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
205 goto out;
206
207 old_state = sk->sk_state;
208 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
209 goto out;
210
211 /* Really, if the socket is already in listen state
212 * we can only allow the backlog to be adjusted.
213 */
214 if (old_state != TCP_LISTEN) {
215 /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
216 * Note that only TCP sockets (SOCK_STREAM) will reach here.
217 * Also fastopen backlog may already been set via the option
218 * because the socket was in TCP_LISTEN state previously but
219 * was shutdown() rather than close().
220 */
221 tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
222 if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
223 (tcp_fastopen & TFO_SERVER_ENABLE) &&
224 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
225 fastopen_queue_tune(sk, backlog);
226 tcp_fastopen_init_key_once(sock_net(sk));
227 }
228
229 err = inet_csk_listen_start(sk, backlog);
230 if (err)
231 goto out;
232 }
233 sk->sk_max_ack_backlog = backlog;
234 err = 0;
235
236out:
237 release_sock(sk);
238 return err;
239}
240EXPORT_SYMBOL(inet_listen);
241
242/*
243 * Create an inet socket.
244 */
245
246static int inet_create(struct net *net, struct socket *sock, int protocol,
247 int kern)
248{
249 struct sock *sk;
250 struct inet_protosw *answer;
251 struct inet_sock *inet;
252 struct proto *answer_prot;
253 unsigned char answer_flags;
254 int try_loading_module = 0;
255 int err;
256
257 if (protocol < 0 || protocol >= IPPROTO_MAX)
258 return -EINVAL;
259
260 sock->state = SS_UNCONNECTED;
261
262 /* Look for the requested type/protocol pair. */
263lookup_protocol:
264 err = -ESOCKTNOSUPPORT;
265 rcu_read_lock();
266 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
267
268 err = 0;
269 /* Check the non-wild match. */
270 if (protocol == answer->protocol) {
271 if (protocol != IPPROTO_IP)
272 break;
273 } else {
274 /* Check for the two wild cases. */
275 if (IPPROTO_IP == protocol) {
276 protocol = answer->protocol;
277 break;
278 }
279 if (IPPROTO_IP == answer->protocol)
280 break;
281 }
282 err = -EPROTONOSUPPORT;
283 }
284
285 if (unlikely(err)) {
286 if (try_loading_module < 2) {
287 rcu_read_unlock();
288 /*
289 * Be more specific, e.g. net-pf-2-proto-132-type-1
290 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
291 */
292 if (++try_loading_module == 1)
293 request_module("net-pf-%d-proto-%d-type-%d",
294 PF_INET, protocol, sock->type);
295 /*
296 * Fall back to generic, e.g. net-pf-2-proto-132
297 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
298 */
299 else
300 request_module("net-pf-%d-proto-%d",
301 PF_INET, protocol);
302 goto lookup_protocol;
303 } else
304 goto out_rcu_unlock;
305 }
306
307 err = -EPERM;
308 if (sock->type == SOCK_RAW && !kern &&
309 !ns_capable(net->user_ns, CAP_NET_RAW))
310 goto out_rcu_unlock;
311
312 sock->ops = answer->ops;
313 answer_prot = answer->prot;
314 answer_flags = answer->flags;
315 rcu_read_unlock();
316
317 WARN_ON(!answer_prot->slab);
318
319 err = -ENOBUFS;
320 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
321 if (!sk)
322 goto out;
323
324 err = 0;
325 if (INET_PROTOSW_REUSE & answer_flags)
326 sk->sk_reuse = SK_CAN_REUSE;
327
328 inet = inet_sk(sk);
329 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
330
331 inet->nodefrag = 0;
332
333 if (SOCK_RAW == sock->type) {
334 inet->inet_num = protocol;
335 if (IPPROTO_RAW == protocol)
336 inet->hdrincl = 1;
337 }
338
339 if (net->ipv4.sysctl_ip_no_pmtu_disc)
340 inet->pmtudisc = IP_PMTUDISC_DONT;
341 else
342 inet->pmtudisc = IP_PMTUDISC_WANT;
343
344 inet->inet_id = 0;
345
346 sock_init_data(sock, sk);
347
348 sk->sk_destruct = inet_sock_destruct;
349 sk->sk_protocol = protocol;
350 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
351
352 inet->uc_ttl = -1;
353 inet->mc_loop = 1;
354 inet->mc_ttl = 1;
355 inet->mc_all = 1;
356 inet->mc_index = 0;
357 inet->mc_list = NULL;
358 inet->rcv_tos = 0;
359
360 sk_refcnt_debug_inc(sk);
361
362 if (inet->inet_num) {
363 /* It assumes that any protocol which allows
364 * the user to assign a number at socket
365 * creation time automatically
366 * shares.
367 */
368 inet->inet_sport = htons(inet->inet_num);
369 /* Add to protocol hash chains. */
370 err = sk->sk_prot->hash(sk);
371 if (err) {
372 sk_common_release(sk);
373 goto out;
374 }
375 }
376
377 if (sk->sk_prot->init) {
378 err = sk->sk_prot->init(sk);
379 if (err) {
380 sk_common_release(sk);
381 goto out;
382 }
383 }
384
385 if (!kern) {
386 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
387 if (err) {
388 sk_common_release(sk);
389 goto out;
390 }
391 }
392out:
393 return err;
394out_rcu_unlock:
395 rcu_read_unlock();
396 goto out;
397}
398
399
400/*
401 * The peer socket should always be NULL (or else). When we call this
402 * function we are destroying the object and from then on nobody
403 * should refer to it.
404 */
405int inet_release(struct socket *sock)
406{
407 struct sock *sk = sock->sk;
408
409 if (sk) {
410 long timeout;
411
412 /* Applications forget to leave groups before exiting */
413 ip_mc_drop_socket(sk);
414
415 /* If linger is set, we don't return until the close
416 * is complete. Otherwise we return immediately. The
417 * actually closing is done the same either way.
418 *
419 * If the close is due to the process exiting, we never
420 * linger..
421 */
422 timeout = 0;
423 if (sock_flag(sk, SOCK_LINGER) &&
424 !(current->flags & PF_EXITING))
425 timeout = sk->sk_lingertime;
426 sock->sk = NULL;
427 sk->sk_prot->close(sk, timeout);
428 }
429 return 0;
430}
431EXPORT_SYMBOL(inet_release);
432
433int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
434{
435 struct sock *sk = sock->sk;
436 int err;
437
438 /* If the socket has its own bind function then use it. (RAW) */
439 if (sk->sk_prot->bind) {
440 return sk->sk_prot->bind(sk, uaddr, addr_len);
441 }
442 if (addr_len < sizeof(struct sockaddr_in))
443 return -EINVAL;
444
445 /* BPF prog is run before any checks are done so that if the prog
446 * changes context in a wrong way it will be caught.
447 */
448 err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
449 if (err)
450 return err;
451
452 return __inet_bind(sk, uaddr, addr_len, false, true);
453}
454EXPORT_SYMBOL(inet_bind);
455
456int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
457 bool force_bind_address_no_port, bool with_lock)
458{
459 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
460 struct inet_sock *inet = inet_sk(sk);
461 struct net *net = sock_net(sk);
462 unsigned short snum;
463 int chk_addr_ret;
464 u32 tb_id = RT_TABLE_LOCAL;
465 int err;
466
467 if (addr->sin_family != AF_INET) {
468 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
469 * only if s_addr is INADDR_ANY.
470 */
471 err = -EAFNOSUPPORT;
472 if (addr->sin_family != AF_UNSPEC ||
473 addr->sin_addr.s_addr != htonl(INADDR_ANY))
474 goto out;
475 }
476
477 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
478 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
479
480 /* Not specified by any standard per-se, however it breaks too
481 * many applications when removed. It is unfortunate since
482 * allowing applications to make a non-local bind solves
483 * several problems with systems using dynamic addressing.
484 * (ie. your servers still start up even if your ISDN link
485 * is temporarily down)
486 */
487 err = -EADDRNOTAVAIL;
488 if (!net->ipv4.sysctl_ip_nonlocal_bind &&
489 !(inet->freebind || inet->transparent) &&
490 addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
491 chk_addr_ret != RTN_LOCAL &&
492 chk_addr_ret != RTN_MULTICAST &&
493 chk_addr_ret != RTN_BROADCAST)
494 goto out;
495
496 snum = ntohs(addr->sin_port);
497 err = -EACCES;
498 if (snum && snum < inet_prot_sock(net) &&
499 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
500 goto out;
501
502 /* We keep a pair of addresses. rcv_saddr is the one
503 * used by hash lookups, and saddr is used for transmit.
504 *
505 * In the BSD API these are the same except where it
506 * would be illegal to use them (multicast/broadcast) in
507 * which case the sending device address is used.
508 */
509 if (with_lock)
510 lock_sock(sk);
511
512 /* Check these errors (active socket, double bind). */
513 err = -EINVAL;
514 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
515 goto out_release_sock;
516
517 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
518 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
519 inet->inet_saddr = 0; /* Use device */
520
521 /* Make sure we are allowed to bind here. */
522 if (snum || !(inet->bind_address_no_port ||
523 force_bind_address_no_port)) {
524 if (sk->sk_prot->get_port(sk, snum)) {
525 inet->inet_saddr = inet->inet_rcv_saddr = 0;
526 err = -EADDRINUSE;
527 goto out_release_sock;
528 }
529 err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
530 if (err) {
531 inet->inet_saddr = inet->inet_rcv_saddr = 0;
532 goto out_release_sock;
533 }
534 }
535
536 if (inet->inet_rcv_saddr)
537 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
538 if (snum)
539 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
540 inet->inet_sport = htons(inet->inet_num);
541 inet->inet_daddr = 0;
542 inet->inet_dport = 0;
543 sk_dst_reset(sk);
544 err = 0;
545out_release_sock:
546 if (with_lock)
547 release_sock(sk);
548out:
549 return err;
550}
551
552int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
553 int addr_len, int flags)
554{
555 struct sock *sk = sock->sk;
556 int err;
557
558 if (addr_len < sizeof(uaddr->sa_family))
559 return -EINVAL;
560 if (uaddr->sa_family == AF_UNSPEC)
561 return sk->sk_prot->disconnect(sk, flags);
562
563 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
564 err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
565 if (err)
566 return err;
567 }
568
569 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
570 return -EAGAIN;
571 return sk->sk_prot->connect(sk, uaddr, addr_len);
572}
573EXPORT_SYMBOL(inet_dgram_connect);
574
575static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
576{
577 DEFINE_WAIT_FUNC(wait, woken_wake_function);
578
579 add_wait_queue(sk_sleep(sk), &wait);
580 sk->sk_write_pending += writebias;
581
582 /* Basic assumption: if someone sets sk->sk_err, he _must_
583 * change state of the socket from TCP_SYN_*.
584 * Connect() does not allow to get error notifications
585 * without closing the socket.
586 */
587 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
588 release_sock(sk);
589 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
590 lock_sock(sk);
591 if (signal_pending(current) || !timeo)
592 break;
593 }
594 remove_wait_queue(sk_sleep(sk), &wait);
595 sk->sk_write_pending -= writebias;
596 return timeo;
597}
598
599/*
600 * Connect to a remote host. There is regrettably still a little
601 * TCP 'magic' in here.
602 */
603int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
604 int addr_len, int flags, int is_sendmsg)
605{
606 struct sock *sk = sock->sk;
607 int err;
608 long timeo;
609
610 /*
611 * uaddr can be NULL and addr_len can be 0 if:
612 * sk is a TCP fastopen active socket and
613 * TCP_FASTOPEN_CONNECT sockopt is set and
614 * we already have a valid cookie for this socket.
615 * In this case, user can call write() after connect().
616 * write() will invoke tcp_sendmsg_fastopen() which calls
617 * __inet_stream_connect().
618 */
619 if (uaddr) {
620 if (addr_len < sizeof(uaddr->sa_family))
621 return -EINVAL;
622
623 if (uaddr->sa_family == AF_UNSPEC) {
624 err = sk->sk_prot->disconnect(sk, flags);
625 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
626 goto out;
627 }
628 }
629
630 switch (sock->state) {
631 default:
632 err = -EINVAL;
633 goto out;
634 case SS_CONNECTED:
635 err = -EISCONN;
636 goto out;
637 case SS_CONNECTING:
638 if (inet_sk(sk)->defer_connect)
639 err = is_sendmsg ? -EINPROGRESS : -EISCONN;
640 else
641 err = -EALREADY;
642 /* Fall out of switch with err, set for this state */
643 break;
644 case SS_UNCONNECTED:
645 err = -EISCONN;
646 if (sk->sk_state != TCP_CLOSE)
647 goto out;
648
649 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
650 err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
651 if (err)
652 goto out;
653 }
654
655 err = sk->sk_prot->connect(sk, uaddr, addr_len);
656 if (err < 0)
657 goto out;
658
659 sock->state = SS_CONNECTING;
660
661 if (!err && inet_sk(sk)->defer_connect)
662 goto out;
663
664 /* Just entered SS_CONNECTING state; the only
665 * difference is that return value in non-blocking
666 * case is EINPROGRESS, rather than EALREADY.
667 */
668 err = -EINPROGRESS;
669 break;
670 }
671
672 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
673
674 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
675 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
676 tcp_sk(sk)->fastopen_req &&
677 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
678
679 /* Error code is set above */
680 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
681 goto out;
682
683 err = sock_intr_errno(timeo);
684 if (signal_pending(current))
685 goto out;
686 }
687
688 /* Connection was closed by RST, timeout, ICMP error
689 * or another process disconnected us.
690 */
691 if (sk->sk_state == TCP_CLOSE)
692 goto sock_error;
693
694 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
695 * and error was received after socket entered established state.
696 * Hence, it is handled normally after connect() return successfully.
697 */
698
699 sock->state = SS_CONNECTED;
700 err = 0;
701out:
702 return err;
703
704sock_error:
705 err = sock_error(sk) ? : -ECONNABORTED;
706 sock->state = SS_UNCONNECTED;
707 if (sk->sk_prot->disconnect(sk, flags))
708 sock->state = SS_DISCONNECTING;
709 goto out;
710}
711EXPORT_SYMBOL(__inet_stream_connect);
712
713int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
714 int addr_len, int flags)
715{
716 int err;
717
718 lock_sock(sock->sk);
719 err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
720 release_sock(sock->sk);
721 return err;
722}
723EXPORT_SYMBOL(inet_stream_connect);
724
725/*
726 * Accept a pending connection. The TCP layer now gives BSD semantics.
727 */
728
729int inet_accept(struct socket *sock, struct socket *newsock, int flags,
730 bool kern)
731{
732 struct sock *sk1 = sock->sk;
733 int err = -EINVAL;
734 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
735
736 if (!sk2)
737 goto do_err;
738
739 lock_sock(sk2);
740
741 sock_rps_record_flow(sk2);
742 WARN_ON(!((1 << sk2->sk_state) &
743 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
744 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
745
746 sock_graft(sk2, newsock);
747
748 newsock->state = SS_CONNECTED;
749 err = 0;
750 release_sock(sk2);
751do_err:
752 return err;
753}
754EXPORT_SYMBOL(inet_accept);
755
756
757/*
758 * This does both peername and sockname.
759 */
760int inet_getname(struct socket *sock, struct sockaddr *uaddr,
761 int peer)
762{
763 struct sock *sk = sock->sk;
764 struct inet_sock *inet = inet_sk(sk);
765 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
766
767 sin->sin_family = AF_INET;
768 if (peer) {
769 if (!inet->inet_dport ||
770 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
771 peer == 1))
772 return -ENOTCONN;
773 sin->sin_port = inet->inet_dport;
774 sin->sin_addr.s_addr = inet->inet_daddr;
775 } else {
776 __be32 addr = inet->inet_rcv_saddr;
777 if (!addr)
778 addr = inet->inet_saddr;
779 sin->sin_port = inet->inet_sport;
780 sin->sin_addr.s_addr = addr;
781 }
782 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
783 return sizeof(*sin);
784}
785EXPORT_SYMBOL(inet_getname);
786
787int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
788{
789 struct sock *sk = sock->sk;
790
791 sock_rps_record_flow(sk);
792
793 /* We may need to bind the socket. */
794 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
795 inet_autobind(sk))
796 return -EAGAIN;
797
798 return sk->sk_prot->sendmsg(sk, msg, size);
799}
800EXPORT_SYMBOL(inet_sendmsg);
801
802ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
803 size_t size, int flags)
804{
805 struct sock *sk = sock->sk;
806
807 sock_rps_record_flow(sk);
808
809 /* We may need to bind the socket. */
810 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
811 inet_autobind(sk))
812 return -EAGAIN;
813
814 if (sk->sk_prot->sendpage)
815 return sk->sk_prot->sendpage(sk, page, offset, size, flags);
816 return sock_no_sendpage(sock, page, offset, size, flags);
817}
818EXPORT_SYMBOL(inet_sendpage);
819
820int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
821 int flags)
822{
823 struct sock *sk = sock->sk;
824 int addr_len = 0;
825 int err;
826
827 if (likely(!(flags & MSG_ERRQUEUE)))
828 sock_rps_record_flow(sk);
829
830 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
831 flags & ~MSG_DONTWAIT, &addr_len);
832 if (err >= 0)
833 msg->msg_namelen = addr_len;
834 return err;
835}
836EXPORT_SYMBOL(inet_recvmsg);
837
838int inet_shutdown(struct socket *sock, int how)
839{
840 struct sock *sk = sock->sk;
841 int err = 0;
842
843 /* This should really check to make sure
844 * the socket is a TCP socket. (WHY AC...)
845 */
846 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
847 1->2 bit 2 snds.
848 2->3 */
849 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
850 return -EINVAL;
851
852 lock_sock(sk);
853 if (sock->state == SS_CONNECTING) {
854 if ((1 << sk->sk_state) &
855 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
856 sock->state = SS_DISCONNECTING;
857 else
858 sock->state = SS_CONNECTED;
859 }
860
861 switch (sk->sk_state) {
862 case TCP_CLOSE:
863 err = -ENOTCONN;
864 /* Hack to wake up other listeners, who can poll for
865 EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
866 /* fall through */
867 default:
868 sk->sk_shutdown |= how;
869 if (sk->sk_prot->shutdown)
870 sk->sk_prot->shutdown(sk, how);
871 break;
872
873 /* Remaining two branches are temporary solution for missing
874 * close() in multithreaded environment. It is _not_ a good idea,
875 * but we have no choice until close() is repaired at VFS level.
876 */
877 case TCP_LISTEN:
878 if (!(how & RCV_SHUTDOWN))
879 break;
880 /* fall through */
881 case TCP_SYN_SENT:
882 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
883 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
884 break;
885 }
886
887 /* Wake up anyone sleeping in poll. */
888 sk->sk_state_change(sk);
889 release_sock(sk);
890 return err;
891}
892EXPORT_SYMBOL(inet_shutdown);
893
894/*
895 * ioctl() calls you can issue on an INET socket. Most of these are
896 * device configuration and stuff and very rarely used. Some ioctls
897 * pass on to the socket itself.
898 *
899 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
900 * loads the devconfigure module does its configuring and unloads it.
901 * There's a good 20K of config code hanging around the kernel.
902 */
903
904int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
905{
906 struct sock *sk = sock->sk;
907 int err = 0;
908 struct net *net = sock_net(sk);
909 void __user *p = (void __user *)arg;
910 struct ifreq ifr;
911 struct rtentry rt;
912
913 switch (cmd) {
914 case SIOCGSTAMP:
915 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
916 break;
917 case SIOCGSTAMPNS:
918 err = sock_get_timestampns(sk, (struct timespec __user *)arg);
919 break;
920 case SIOCADDRT:
921 case SIOCDELRT:
922 if (copy_from_user(&rt, p, sizeof(struct rtentry)))
923 return -EFAULT;
924 err = ip_rt_ioctl(net, cmd, &rt);
925 break;
926 case SIOCRTMSG:
927 err = -EINVAL;
928 break;
929 case SIOCDARP:
930 case SIOCGARP:
931 case SIOCSARP:
932 err = arp_ioctl(net, cmd, (void __user *)arg);
933 break;
934 case SIOCGIFADDR:
935 case SIOCGIFBRDADDR:
936 case SIOCGIFNETMASK:
937 case SIOCGIFDSTADDR:
938 case SIOCGIFPFLAGS:
939 if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
940 return -EFAULT;
941 err = devinet_ioctl(net, cmd, &ifr);
942 if (!err && copy_to_user(p, &ifr, sizeof(struct ifreq)))
943 err = -EFAULT;
944 break;
945
946 case SIOCSIFADDR:
947 case SIOCSIFBRDADDR:
948 case SIOCSIFNETMASK:
949 case SIOCSIFDSTADDR:
950 case SIOCSIFPFLAGS:
951 case SIOCSIFFLAGS:
952 if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
953 return -EFAULT;
954 err = devinet_ioctl(net, cmd, &ifr);
955 break;
956 default:
957 if (sk->sk_prot->ioctl)
958 err = sk->sk_prot->ioctl(sk, cmd, arg);
959 else
960 err = -ENOIOCTLCMD;
961 break;
962 }
963 return err;
964}
965EXPORT_SYMBOL(inet_ioctl);
966
967#ifdef CONFIG_COMPAT
968static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
969{
970 struct sock *sk = sock->sk;
971 int err = -ENOIOCTLCMD;
972
973 if (sk->sk_prot->compat_ioctl)
974 err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
975
976 return err;
977}
978#endif
979
980const struct proto_ops inet_stream_ops = {
981 .family = PF_INET,
982 .owner = THIS_MODULE,
983 .release = inet_release,
984 .bind = inet_bind,
985 .connect = inet_stream_connect,
986 .socketpair = sock_no_socketpair,
987 .accept = inet_accept,
988 .getname = inet_getname,
989 .poll = tcp_poll,
990 .ioctl = inet_ioctl,
991 .listen = inet_listen,
992 .shutdown = inet_shutdown,
993 .setsockopt = sock_common_setsockopt,
994 .getsockopt = sock_common_getsockopt,
995 .sendmsg = inet_sendmsg,
996 .recvmsg = inet_recvmsg,
997 .mmap = sock_no_mmap,
998 .sendpage = inet_sendpage,
999 .splice_read = tcp_splice_read,
1000 .read_sock = tcp_read_sock,
1001 .sendmsg_locked = tcp_sendmsg_locked,
1002 .sendpage_locked = tcp_sendpage_locked,
1003 .peek_len = tcp_peek_len,
1004#ifdef CONFIG_COMPAT
1005 .compat_setsockopt = compat_sock_common_setsockopt,
1006 .compat_getsockopt = compat_sock_common_getsockopt,
1007 .compat_ioctl = inet_compat_ioctl,
1008#endif
1009};
1010EXPORT_SYMBOL(inet_stream_ops);
1011
1012const struct proto_ops inet_dgram_ops = {
1013 .family = PF_INET,
1014 .owner = THIS_MODULE,
1015 .release = inet_release,
1016 .bind = inet_bind,
1017 .connect = inet_dgram_connect,
1018 .socketpair = sock_no_socketpair,
1019 .accept = sock_no_accept,
1020 .getname = inet_getname,
1021 .poll = udp_poll,
1022 .ioctl = inet_ioctl,
1023 .listen = sock_no_listen,
1024 .shutdown = inet_shutdown,
1025 .setsockopt = sock_common_setsockopt,
1026 .getsockopt = sock_common_getsockopt,
1027 .sendmsg = inet_sendmsg,
1028 .recvmsg = inet_recvmsg,
1029 .mmap = sock_no_mmap,
1030 .sendpage = inet_sendpage,
1031 .set_peek_off = sk_set_peek_off,
1032#ifdef CONFIG_COMPAT
1033 .compat_setsockopt = compat_sock_common_setsockopt,
1034 .compat_getsockopt = compat_sock_common_getsockopt,
1035 .compat_ioctl = inet_compat_ioctl,
1036#endif
1037};
1038EXPORT_SYMBOL(inet_dgram_ops);
1039
1040/*
1041 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1042 * udp_poll
1043 */
1044static const struct proto_ops inet_sockraw_ops = {
1045 .family = PF_INET,
1046 .owner = THIS_MODULE,
1047 .release = inet_release,
1048 .bind = inet_bind,
1049 .connect = inet_dgram_connect,
1050 .socketpair = sock_no_socketpair,
1051 .accept = sock_no_accept,
1052 .getname = inet_getname,
1053 .poll = datagram_poll,
1054 .ioctl = inet_ioctl,
1055 .listen = sock_no_listen,
1056 .shutdown = inet_shutdown,
1057 .setsockopt = sock_common_setsockopt,
1058 .getsockopt = sock_common_getsockopt,
1059 .sendmsg = inet_sendmsg,
1060 .recvmsg = inet_recvmsg,
1061 .mmap = sock_no_mmap,
1062 .sendpage = inet_sendpage,
1063#ifdef CONFIG_COMPAT
1064 .compat_setsockopt = compat_sock_common_setsockopt,
1065 .compat_getsockopt = compat_sock_common_getsockopt,
1066 .compat_ioctl = inet_compat_ioctl,
1067#endif
1068};
1069
1070static const struct net_proto_family inet_family_ops = {
1071 .family = PF_INET,
1072 .create = inet_create,
1073 .owner = THIS_MODULE,
1074};
1075
1076/* Upon startup we insert all the elements in inetsw_array[] into
1077 * the linked list inetsw.
1078 */
1079static struct inet_protosw inetsw_array[] =
1080{
1081 {
1082 .type = SOCK_STREAM,
1083 .protocol = IPPROTO_TCP,
1084 .prot = &tcp_prot,
1085 .ops = &inet_stream_ops,
1086 .flags = INET_PROTOSW_PERMANENT |
1087 INET_PROTOSW_ICSK,
1088 },
1089
1090 {
1091 .type = SOCK_DGRAM,
1092 .protocol = IPPROTO_UDP,
1093 .prot = &udp_prot,
1094 .ops = &inet_dgram_ops,
1095 .flags = INET_PROTOSW_PERMANENT,
1096 },
1097
1098 {
1099 .type = SOCK_DGRAM,
1100 .protocol = IPPROTO_ICMP,
1101 .prot = &ping_prot,
1102 .ops = &inet_sockraw_ops,
1103 .flags = INET_PROTOSW_REUSE,
1104 },
1105
1106 {
1107 .type = SOCK_RAW,
1108 .protocol = IPPROTO_IP, /* wild card */
1109 .prot = &raw_prot,
1110 .ops = &inet_sockraw_ops,
1111 .flags = INET_PROTOSW_REUSE,
1112 }
1113};
1114
1115#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1116
1117void inet_register_protosw(struct inet_protosw *p)
1118{
1119 struct list_head *lh;
1120 struct inet_protosw *answer;
1121 int protocol = p->protocol;
1122 struct list_head *last_perm;
1123
1124 spin_lock_bh(&inetsw_lock);
1125
1126 if (p->type >= SOCK_MAX)
1127 goto out_illegal;
1128
1129 /* If we are trying to override a permanent protocol, bail. */
1130 last_perm = &inetsw[p->type];
1131 list_for_each(lh, &inetsw[p->type]) {
1132 answer = list_entry(lh, struct inet_protosw, list);
1133 /* Check only the non-wild match. */
1134 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1135 break;
1136 if (protocol == answer->protocol)
1137 goto out_permanent;
1138 last_perm = lh;
1139 }
1140
1141 /* Add the new entry after the last permanent entry if any, so that
1142 * the new entry does not override a permanent entry when matched with
1143 * a wild-card protocol. But it is allowed to override any existing
1144 * non-permanent entry. This means that when we remove this entry, the
1145 * system automatically returns to the old behavior.
1146 */
1147 list_add_rcu(&p->list, last_perm);
1148out:
1149 spin_unlock_bh(&inetsw_lock);
1150
1151 return;
1152
1153out_permanent:
1154 pr_err("Attempt to override permanent protocol %d\n", protocol);
1155 goto out;
1156
1157out_illegal:
1158 pr_err("Ignoring attempt to register invalid socket type %d\n",
1159 p->type);
1160 goto out;
1161}
1162EXPORT_SYMBOL(inet_register_protosw);
1163
1164void inet_unregister_protosw(struct inet_protosw *p)
1165{
1166 if (INET_PROTOSW_PERMANENT & p->flags) {
1167 pr_err("Attempt to unregister permanent protocol %d\n",
1168 p->protocol);
1169 } else {
1170 spin_lock_bh(&inetsw_lock);
1171 list_del_rcu(&p->list);
1172 spin_unlock_bh(&inetsw_lock);
1173
1174 synchronize_net();
1175 }
1176}
1177EXPORT_SYMBOL(inet_unregister_protosw);
1178
1179static int inet_sk_reselect_saddr(struct sock *sk)
1180{
1181 struct inet_sock *inet = inet_sk(sk);
1182 __be32 old_saddr = inet->inet_saddr;
1183 __be32 daddr = inet->inet_daddr;
1184 struct flowi4 *fl4;
1185 struct rtable *rt;
1186 __be32 new_saddr;
1187 struct ip_options_rcu *inet_opt;
1188
1189 inet_opt = rcu_dereference_protected(inet->inet_opt,
1190 lockdep_sock_is_held(sk));
1191 if (inet_opt && inet_opt->opt.srr)
1192 daddr = inet_opt->opt.faddr;
1193
1194 /* Query new route. */
1195 fl4 = &inet->cork.fl.u.ip4;
1196 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1197 sk->sk_bound_dev_if, sk->sk_protocol,
1198 inet->inet_sport, inet->inet_dport, sk);
1199 if (IS_ERR(rt))
1200 return PTR_ERR(rt);
1201
1202 sk_setup_caps(sk, &rt->dst);
1203
1204 new_saddr = fl4->saddr;
1205
1206 if (new_saddr == old_saddr)
1207 return 0;
1208
1209 if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) {
1210 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1211 __func__, &old_saddr, &new_saddr);
1212 }
1213
1214 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
1215
1216 /*
1217 * XXX The only one ugly spot where we need to
1218 * XXX really change the sockets identity after
1219 * XXX it has entered the hashes. -DaveM
1220 *
1221 * Besides that, it does not check for connection
1222 * uniqueness. Wait for troubles.
1223 */
1224 return __sk_prot_rehash(sk);
1225}
1226
1227int inet_sk_rebuild_header(struct sock *sk)
1228{
1229 struct inet_sock *inet = inet_sk(sk);
1230 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1231 __be32 daddr;
1232 struct ip_options_rcu *inet_opt;
1233 struct flowi4 *fl4;
1234 int err;
1235
1236 /* Route is OK, nothing to do. */
1237 if (rt)
1238 return 0;
1239
1240 /* Reroute. */
1241 rcu_read_lock();
1242 inet_opt = rcu_dereference(inet->inet_opt);
1243 daddr = inet->inet_daddr;
1244 if (inet_opt && inet_opt->opt.srr)
1245 daddr = inet_opt->opt.faddr;
1246 rcu_read_unlock();
1247 fl4 = &inet->cork.fl.u.ip4;
1248 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1249 inet->inet_dport, inet->inet_sport,
1250 sk->sk_protocol, RT_CONN_FLAGS(sk),
1251 sk->sk_bound_dev_if);
1252 if (!IS_ERR(rt)) {
1253 err = 0;
1254 sk_setup_caps(sk, &rt->dst);
1255 } else {
1256 err = PTR_ERR(rt);
1257
1258 /* Routing failed... */
1259 sk->sk_route_caps = 0;
1260 /*
1261 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1262 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1263 */
1264 if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr ||
1265 sk->sk_state != TCP_SYN_SENT ||
1266 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1267 (err = inet_sk_reselect_saddr(sk)) != 0)
1268 sk->sk_err_soft = -err;
1269 }
1270
1271 return err;
1272}
1273EXPORT_SYMBOL(inet_sk_rebuild_header);
1274
1275void inet_sk_set_state(struct sock *sk, int state)
1276{
1277 trace_inet_sock_set_state(sk, sk->sk_state, state);
1278 sk->sk_state = state;
1279}
1280EXPORT_SYMBOL(inet_sk_set_state);
1281
1282void inet_sk_state_store(struct sock *sk, int newstate)
1283{
1284 trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1285 smp_store_release(&sk->sk_state, newstate);
1286}
1287
1288struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1289 netdev_features_t features)
1290{
1291 bool udpfrag = false, fixedid = false, gso_partial, encap;
1292 struct sk_buff *segs = ERR_PTR(-EINVAL);
1293 const struct net_offload *ops;
1294 unsigned int offset = 0;
1295 struct iphdr *iph;
1296 int proto, tot_len;
1297 int nhoff;
1298 int ihl;
1299 int id;
1300
1301 skb_reset_network_header(skb);
1302 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1303 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1304 goto out;
1305
1306 iph = ip_hdr(skb);
1307 ihl = iph->ihl * 4;
1308 if (ihl < sizeof(*iph))
1309 goto out;
1310
1311 id = ntohs(iph->id);
1312 proto = iph->protocol;
1313
1314 /* Warning: after this point, iph might be no longer valid */
1315 if (unlikely(!pskb_may_pull(skb, ihl)))
1316 goto out;
1317 __skb_pull(skb, ihl);
1318
1319 encap = SKB_GSO_CB(skb)->encap_level > 0;
1320 if (encap)
1321 features &= skb->dev->hw_enc_features;
1322 SKB_GSO_CB(skb)->encap_level += ihl;
1323
1324 skb_reset_transport_header(skb);
1325
1326 segs = ERR_PTR(-EPROTONOSUPPORT);
1327
1328 if (!skb->encapsulation || encap) {
1329 udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1330 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1331
1332 /* fixed ID is invalid if DF bit is not set */
1333 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1334 goto out;
1335 }
1336
1337 ops = rcu_dereference(inet_offloads[proto]);
1338 if (likely(ops && ops->callbacks.gso_segment))
1339 segs = ops->callbacks.gso_segment(skb, features);
1340
1341 if (IS_ERR_OR_NULL(segs))
1342 goto out;
1343
1344 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1345
1346 skb = segs;
1347 do {
1348 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1349 if (udpfrag) {
1350 iph->frag_off = htons(offset >> 3);
1351 if (skb->next)
1352 iph->frag_off |= htons(IP_MF);
1353 offset += skb->len - nhoff - ihl;
1354 tot_len = skb->len - nhoff;
1355 } else if (skb_is_gso(skb)) {
1356 if (!fixedid) {
1357 iph->id = htons(id);
1358 id += skb_shinfo(skb)->gso_segs;
1359 }
1360
1361 if (gso_partial)
1362 tot_len = skb_shinfo(skb)->gso_size +
1363 SKB_GSO_CB(skb)->data_offset +
1364 skb->head - (unsigned char *)iph;
1365 else
1366 tot_len = skb->len - nhoff;
1367 } else {
1368 if (!fixedid)
1369 iph->id = htons(id++);
1370 tot_len = skb->len - nhoff;
1371 }
1372 iph->tot_len = htons(tot_len);
1373 ip_send_check(iph);
1374 if (encap)
1375 skb_reset_inner_headers(skb);
1376 skb->network_header = (u8 *)iph - skb->head;
1377 } while ((skb = skb->next));
1378
1379out:
1380 return segs;
1381}
1382EXPORT_SYMBOL(inet_gso_segment);
1383
1384struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1385{
1386 const struct net_offload *ops;
1387 struct sk_buff **pp = NULL;
1388 struct sk_buff *p;
1389 const struct iphdr *iph;
1390 unsigned int hlen;
1391 unsigned int off;
1392 unsigned int id;
1393 int flush = 1;
1394 int proto;
1395
1396 off = skb_gro_offset(skb);
1397 hlen = off + sizeof(*iph);
1398 iph = skb_gro_header_fast(skb, off);
1399 if (skb_gro_header_hard(skb, hlen)) {
1400 iph = skb_gro_header_slow(skb, hlen, off);
1401 if (unlikely(!iph))
1402 goto out;
1403 }
1404
1405 proto = iph->protocol;
1406
1407 rcu_read_lock();
1408 ops = rcu_dereference(inet_offloads[proto]);
1409 if (!ops || !ops->callbacks.gro_receive)
1410 goto out_unlock;
1411
1412 if (*(u8 *)iph != 0x45)
1413 goto out_unlock;
1414
1415 if (ip_is_fragment(iph))
1416 goto out_unlock;
1417
1418 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1419 goto out_unlock;
1420
1421 id = ntohl(*(__be32 *)&iph->id);
1422 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1423 id >>= 16;
1424
1425 for (p = *head; p; p = p->next) {
1426 struct iphdr *iph2;
1427 u16 flush_id;
1428
1429 if (!NAPI_GRO_CB(p)->same_flow)
1430 continue;
1431
1432 iph2 = (struct iphdr *)(p->data + off);
1433 /* The above works because, with the exception of the top
1434 * (inner most) layer, we only aggregate pkts with the same
1435 * hdr length so all the hdrs we'll need to verify will start
1436 * at the same offset.
1437 */
1438 if ((iph->protocol ^ iph2->protocol) |
1439 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1440 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1441 NAPI_GRO_CB(p)->same_flow = 0;
1442 continue;
1443 }
1444
1445 /* All fields must match except length and checksum. */
1446 NAPI_GRO_CB(p)->flush |=
1447 (iph->ttl ^ iph2->ttl) |
1448 (iph->tos ^ iph2->tos) |
1449 ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
1450
1451 NAPI_GRO_CB(p)->flush |= flush;
1452
1453 /* We need to store of the IP ID check to be included later
1454 * when we can verify that this packet does in fact belong
1455 * to a given flow.
1456 */
1457 flush_id = (u16)(id - ntohs(iph2->id));
1458
1459 /* This bit of code makes it much easier for us to identify
1460 * the cases where we are doing atomic vs non-atomic IP ID
1461 * checks. Specifically an atomic check can return IP ID
1462 * values 0 - 0xFFFF, while a non-atomic check can only
1463 * return 0 or 0xFFFF.
1464 */
1465 if (!NAPI_GRO_CB(p)->is_atomic ||
1466 !(iph->frag_off & htons(IP_DF))) {
1467 flush_id ^= NAPI_GRO_CB(p)->count;
1468 flush_id = flush_id ? 0xFFFF : 0;
1469 }
1470
1471 /* If the previous IP ID value was based on an atomic
1472 * datagram we can overwrite the value and ignore it.
1473 */
1474 if (NAPI_GRO_CB(skb)->is_atomic)
1475 NAPI_GRO_CB(p)->flush_id = flush_id;
1476 else
1477 NAPI_GRO_CB(p)->flush_id |= flush_id;
1478 }
1479
1480 NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
1481 NAPI_GRO_CB(skb)->flush |= flush;
1482 skb_set_network_header(skb, off);
1483 /* The above will be needed by the transport layer if there is one
1484 * immediately following this IP hdr.
1485 */
1486
1487 /* Note : No need to call skb_gro_postpull_rcsum() here,
1488 * as we already checked checksum over ipv4 header was 0
1489 */
1490 skb_gro_pull(skb, sizeof(*iph));
1491 skb_set_transport_header(skb, skb_gro_offset(skb));
1492
1493 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1494
1495out_unlock:
1496 rcu_read_unlock();
1497
1498out:
1499 skb_gro_flush_final(skb, pp, flush);
1500
1501 return pp;
1502}
1503EXPORT_SYMBOL(inet_gro_receive);
1504
1505static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
1506 struct sk_buff *skb)
1507{
1508 if (NAPI_GRO_CB(skb)->encap_mark) {
1509 NAPI_GRO_CB(skb)->flush = 1;
1510 return NULL;
1511 }
1512
1513 NAPI_GRO_CB(skb)->encap_mark = 1;
1514
1515 return inet_gro_receive(head, skb);
1516}
1517
1518#define SECONDS_PER_DAY 86400
1519
1520/* inet_current_timestamp - Return IP network timestamp
1521 *
1522 * Return milliseconds since midnight in network byte order.
1523 */
1524__be32 inet_current_timestamp(void)
1525{
1526 u32 secs;
1527 u32 msecs;
1528 struct timespec64 ts;
1529
1530 ktime_get_real_ts64(&ts);
1531
1532 /* Get secs since midnight. */
1533 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1534 /* Convert to msecs. */
1535 msecs = secs * MSEC_PER_SEC;
1536 /* Convert nsec to msec. */
1537 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1538
1539 /* Convert to network byte order. */
1540 return htonl(msecs);
1541}
1542EXPORT_SYMBOL(inet_current_timestamp);
1543
1544int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1545{
1546 if (sk->sk_family == AF_INET)
1547 return ip_recv_error(sk, msg, len, addr_len);
1548#if IS_ENABLED(CONFIG_IPV6)
1549 if (sk->sk_family == AF_INET6)
1550 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1551#endif
1552 return -EINVAL;
1553}
1554
1555int inet_gro_complete(struct sk_buff *skb, int nhoff)
1556{
1557 __be16 newlen = htons(skb->len - nhoff);
1558 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1559 const struct net_offload *ops;
1560 int proto = iph->protocol;
1561 int err = -ENOSYS;
1562
1563 if (skb->encapsulation) {
1564 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1565 skb_set_inner_network_header(skb, nhoff);
1566 }
1567
1568 csum_replace2(&iph->check, iph->tot_len, newlen);
1569 iph->tot_len = newlen;
1570
1571 rcu_read_lock();
1572 ops = rcu_dereference(inet_offloads[proto]);
1573 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1574 goto out_unlock;
1575
1576 /* Only need to add sizeof(*iph) to get to the next hdr below
1577 * because any hdr with option will have been flushed in
1578 * inet_gro_receive().
1579 */
1580 err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
1581
1582out_unlock:
1583 rcu_read_unlock();
1584
1585 return err;
1586}
1587EXPORT_SYMBOL(inet_gro_complete);
1588
1589static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1590{
1591 skb->encapsulation = 1;
1592 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1593 return inet_gro_complete(skb, nhoff);
1594}
1595
1596int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1597 unsigned short type, unsigned char protocol,
1598 struct net *net)
1599{
1600 struct socket *sock;
1601 int rc = sock_create_kern(net, family, type, protocol, &sock);
1602
1603 if (rc == 0) {
1604 *sk = sock->sk;
1605 (*sk)->sk_allocation = GFP_ATOMIC;
1606 /*
1607 * Unhash it so that IP input processing does not even see it,
1608 * we do not wish this socket to see incoming packets.
1609 */
1610 (*sk)->sk_prot->unhash(*sk);
1611 }
1612 return rc;
1613}
1614EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1615
1616u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
1617{
1618 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
1619}
1620EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
1621
1622unsigned long snmp_fold_field(void __percpu *mib, int offt)
1623{
1624 unsigned long res = 0;
1625 int i;
1626
1627 for_each_possible_cpu(i)
1628 res += snmp_get_cpu_field(mib, i, offt);
1629 return res;
1630}
1631EXPORT_SYMBOL_GPL(snmp_fold_field);
1632
1633#if BITS_PER_LONG==32
1634
1635u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1636 size_t syncp_offset)
1637{
1638 void *bhptr;
1639 struct u64_stats_sync *syncp;
1640 u64 v;
1641 unsigned int start;
1642
1643 bhptr = per_cpu_ptr(mib, cpu);
1644 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1645 do {
1646 start = u64_stats_fetch_begin_irq(syncp);
1647 v = *(((u64 *)bhptr) + offt);
1648 } while (u64_stats_fetch_retry_irq(syncp, start));
1649
1650 return v;
1651}
1652EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1653
1654u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1655{
1656 u64 res = 0;
1657 int cpu;
1658
1659 for_each_possible_cpu(cpu) {
1660 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1661 }
1662 return res;
1663}
1664EXPORT_SYMBOL_GPL(snmp_fold_field64);
1665#endif
1666
1667#ifdef CONFIG_IP_MULTICAST
1668static const struct net_protocol igmp_protocol = {
1669 .handler = igmp_rcv,
1670 .netns_ok = 1,
1671};
1672#endif
1673
1674/* thinking of making this const? Don't.
1675 * early_demux can change based on sysctl.
1676 */
1677static struct net_protocol tcp_protocol = {
1678 .early_demux = tcp_v4_early_demux,
1679 .early_demux_handler = tcp_v4_early_demux,
1680 .handler = tcp_v4_rcv,
1681 .err_handler = tcp_v4_err,
1682 .no_policy = 1,
1683 .netns_ok = 1,
1684 .icmp_strict_tag_validation = 1,
1685};
1686
1687/* thinking of making this const? Don't.
1688 * early_demux can change based on sysctl.
1689 */
1690static struct net_protocol udp_protocol = {
1691 .early_demux = udp_v4_early_demux,
1692 .early_demux_handler = udp_v4_early_demux,
1693 .handler = udp_rcv,
1694 .err_handler = udp_err,
1695 .no_policy = 1,
1696 .netns_ok = 1,
1697};
1698
1699static const struct net_protocol icmp_protocol = {
1700 .handler = icmp_rcv,
1701 .err_handler = icmp_err,
1702 .no_policy = 1,
1703 .netns_ok = 1,
1704};
1705
1706static __net_init int ipv4_mib_init_net(struct net *net)
1707{
1708 int i;
1709
1710 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1711 if (!net->mib.tcp_statistics)
1712 goto err_tcp_mib;
1713 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1714 if (!net->mib.ip_statistics)
1715 goto err_ip_mib;
1716
1717 for_each_possible_cpu(i) {
1718 struct ipstats_mib *af_inet_stats;
1719 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1720 u64_stats_init(&af_inet_stats->syncp);
1721 }
1722
1723 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1724 if (!net->mib.net_statistics)
1725 goto err_net_mib;
1726 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1727 if (!net->mib.udp_statistics)
1728 goto err_udp_mib;
1729 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1730 if (!net->mib.udplite_statistics)
1731 goto err_udplite_mib;
1732 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1733 if (!net->mib.icmp_statistics)
1734 goto err_icmp_mib;
1735 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1736 GFP_KERNEL);
1737 if (!net->mib.icmpmsg_statistics)
1738 goto err_icmpmsg_mib;
1739
1740 tcp_mib_init(net);
1741 return 0;
1742
1743err_icmpmsg_mib:
1744 free_percpu(net->mib.icmp_statistics);
1745err_icmp_mib:
1746 free_percpu(net->mib.udplite_statistics);
1747err_udplite_mib:
1748 free_percpu(net->mib.udp_statistics);
1749err_udp_mib:
1750 free_percpu(net->mib.net_statistics);
1751err_net_mib:
1752 free_percpu(net->mib.ip_statistics);
1753err_ip_mib:
1754 free_percpu(net->mib.tcp_statistics);
1755err_tcp_mib:
1756 return -ENOMEM;
1757}
1758
1759static __net_exit void ipv4_mib_exit_net(struct net *net)
1760{
1761 kfree(net->mib.icmpmsg_statistics);
1762 free_percpu(net->mib.icmp_statistics);
1763 free_percpu(net->mib.udplite_statistics);
1764 free_percpu(net->mib.udp_statistics);
1765 free_percpu(net->mib.net_statistics);
1766 free_percpu(net->mib.ip_statistics);
1767 free_percpu(net->mib.tcp_statistics);
1768}
1769
1770static __net_initdata struct pernet_operations ipv4_mib_ops = {
1771 .init = ipv4_mib_init_net,
1772 .exit = ipv4_mib_exit_net,
1773};
1774
1775static int __init init_ipv4_mibs(void)
1776{
1777 return register_pernet_subsys(&ipv4_mib_ops);
1778}
1779
1780static __net_init int inet_init_net(struct net *net)
1781{
1782 /*
1783 * Set defaults for local port range
1784 */
1785 seqlock_init(&net->ipv4.ip_local_ports.lock);
1786 net->ipv4.ip_local_ports.range[0] = 32768;
1787 net->ipv4.ip_local_ports.range[1] = 60999;
1788
1789 seqlock_init(&net->ipv4.ping_group_range.lock);
1790 /*
1791 * Sane defaults - nobody may create ping sockets.
1792 * Boot scripts should set this to distro-specific group.
1793 */
1794 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1795 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1796
1797 /* Default values for sysctl-controlled parameters.
1798 * We set them here, in case sysctl is not compiled.
1799 */
1800 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1801 net->ipv4.sysctl_ip_dynaddr = 0;
1802 net->ipv4.sysctl_ip_early_demux = 1;
1803 net->ipv4.sysctl_udp_early_demux = 1;
1804 net->ipv4.sysctl_tcp_early_demux = 1;
1805#ifdef CONFIG_SYSCTL
1806 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1807#endif
1808
1809 /* Some igmp sysctl, whose values are always used */
1810 net->ipv4.sysctl_igmp_max_memberships = 20;
1811 net->ipv4.sysctl_igmp_max_msf = 10;
1812 /* IGMP reports for link-local multicast groups are enabled by default */
1813 net->ipv4.sysctl_igmp_llm_reports = 1;
1814 net->ipv4.sysctl_igmp_qrv = 2;
1815
1816 return 0;
1817}
1818
1819static __net_exit void inet_exit_net(struct net *net)
1820{
1821}
1822
1823static __net_initdata struct pernet_operations af_inet_ops = {
1824 .init = inet_init_net,
1825 .exit = inet_exit_net,
1826};
1827
1828static int __init init_inet_pernet_ops(void)
1829{
1830 return register_pernet_subsys(&af_inet_ops);
1831}
1832
1833static int ipv4_proc_init(void);
1834
1835/*
1836 * IP protocol layer initialiser
1837 */
1838
1839static struct packet_offload ip_packet_offload __read_mostly = {
1840 .type = cpu_to_be16(ETH_P_IP),
1841 .callbacks = {
1842 .gso_segment = inet_gso_segment,
1843 .gro_receive = inet_gro_receive,
1844 .gro_complete = inet_gro_complete,
1845 },
1846};
1847
1848static const struct net_offload ipip_offload = {
1849 .callbacks = {
1850 .gso_segment = inet_gso_segment,
1851 .gro_receive = ipip_gro_receive,
1852 .gro_complete = ipip_gro_complete,
1853 },
1854};
1855
1856static int __init ipip_offload_init(void)
1857{
1858 return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1859}
1860
1861static int __init ipv4_offload_init(void)
1862{
1863 /*
1864 * Add offloads
1865 */
1866 if (udpv4_offload_init() < 0)
1867 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1868 if (tcpv4_offload_init() < 0)
1869 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1870 if (ipip_offload_init() < 0)
1871 pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1872
1873 dev_add_offload(&ip_packet_offload);
1874 return 0;
1875}
1876
1877fs_initcall(ipv4_offload_init);
1878
1879static struct packet_type ip_packet_type __read_mostly = {
1880 .type = cpu_to_be16(ETH_P_IP),
1881 .func = ip_rcv,
1882};
1883
1884static int __init inet_init(void)
1885{
1886 struct inet_protosw *q;
1887 struct list_head *r;
1888 int rc = -EINVAL;
1889
1890 sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1891
1892 rc = proto_register(&tcp_prot, 1);
1893 if (rc)
1894 goto out;
1895
1896 rc = proto_register(&udp_prot, 1);
1897 if (rc)
1898 goto out_unregister_tcp_proto;
1899
1900 rc = proto_register(&raw_prot, 1);
1901 if (rc)
1902 goto out_unregister_udp_proto;
1903
1904 rc = proto_register(&ping_prot, 1);
1905 if (rc)
1906 goto out_unregister_raw_proto;
1907
1908 /*
1909 * Tell SOCKET that we are alive...
1910 */
1911
1912 (void)sock_register(&inet_family_ops);
1913
1914#ifdef CONFIG_SYSCTL
1915 ip_static_sysctl_init();
1916#endif
1917
1918 /*
1919 * Add all the base protocols.
1920 */
1921
1922 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1923 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1924 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1925 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1926 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1927 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1928#ifdef CONFIG_IP_MULTICAST
1929 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1930 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1931#endif
1932
1933 /* Register the socket-side information for inet_create. */
1934 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1935 INIT_LIST_HEAD(r);
1936
1937 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1938 inet_register_protosw(q);
1939
1940 /*
1941 * Set the ARP module up
1942 */
1943
1944 arp_init();
1945
1946 /*
1947 * Set the IP module up
1948 */
1949
1950 ip_init();
1951
1952 /* Setup TCP slab cache for open requests. */
1953 tcp_init();
1954
1955 /* Setup UDP memory threshold */
1956 udp_init();
1957
1958 /* Add UDP-Lite (RFC 3828) */
1959 udplite4_register();
1960
1961 ping_init();
1962
1963 /*
1964 * Set the ICMP layer up
1965 */
1966
1967 if (icmp_init() < 0)
1968 panic("Failed to create the ICMP control socket.\n");
1969
1970 /*
1971 * Initialise the multicast router
1972 */
1973#if defined(CONFIG_IP_MROUTE)
1974 if (ip_mr_init())
1975 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1976#endif
1977
1978 if (init_inet_pernet_ops())
1979 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1980 /*
1981 * Initialise per-cpu ipv4 mibs
1982 */
1983
1984 if (init_ipv4_mibs())
1985 pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1986
1987 ipv4_proc_init();
1988
1989 ipfrag_init();
1990
1991 dev_add_pack(&ip_packet_type);
1992
1993 ip_tunnel_core_init();
1994
1995 rc = 0;
1996out:
1997 return rc;
1998out_unregister_raw_proto:
1999 proto_unregister(&raw_prot);
2000out_unregister_udp_proto:
2001 proto_unregister(&udp_prot);
2002out_unregister_tcp_proto:
2003 proto_unregister(&tcp_prot);
2004 goto out;
2005}
2006
2007fs_initcall(inet_init);
2008
2009/* ------------------------------------------------------------------------ */
2010
2011#ifdef CONFIG_PROC_FS
2012static int __init ipv4_proc_init(void)
2013{
2014 int rc = 0;
2015
2016 if (raw_proc_init())
2017 goto out_raw;
2018 if (tcp4_proc_init())
2019 goto out_tcp;
2020 if (udp4_proc_init())
2021 goto out_udp;
2022 if (ping_proc_init())
2023 goto out_ping;
2024 if (ip_misc_proc_init())
2025 goto out_misc;
2026out:
2027 return rc;
2028out_misc:
2029 ping_proc_exit();
2030out_ping:
2031 udp4_proc_exit();
2032out_udp:
2033 tcp4_proc_exit();
2034out_tcp:
2035 raw_proc_exit();
2036out_raw:
2037 rc = -ENOMEM;
2038 goto out;
2039}
2040
2041#else /* CONFIG_PROC_FS */
2042static int __init ipv4_proc_init(void)
2043{
2044 return 0;
2045}
2046#endif /* CONFIG_PROC_FS */