Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP to API glue.
8 *
9 * Authors: see ip.c
10 *
11 * Fixes:
12 * Many : Split from ip.c , see ip.c for history.
13 * Martin Mares : TOS setting fixed.
14 * Alan Cox : Fixed a couple of oopses in Martin's
15 * TOS tweaks.
16 * Mike McLagan : Routing by source
17 */
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/mm.h>
22#include <linux/skbuff.h>
23#include <linux/ip.h>
24#include <linux/icmp.h>
25#include <linux/inetdevice.h>
26#include <linux/netdevice.h>
27#include <linux/slab.h>
28#include <net/sock.h>
29#include <net/ip.h>
30#include <net/icmp.h>
31#include <net/tcp_states.h>
32#include <linux/udp.h>
33#include <linux/igmp.h>
34#include <linux/netfilter.h>
35#include <linux/route.h>
36#include <linux/mroute.h>
37#include <net/inet_ecn.h>
38#include <net/route.h>
39#include <net/xfrm.h>
40#include <net/compat.h>
41#include <net/checksum.h>
42#if IS_ENABLED(CONFIG_IPV6)
43#include <net/transp_v6.h>
44#endif
45#include <net/ip_fib.h>
46
47#include <linux/errqueue.h>
48#include <linux/uaccess.h>
49
50/*
51 * SOL_IP control messages.
52 */
53
54static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
55{
56 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
57
58 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
59
60 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
61}
62
63static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
64{
65 int ttl = ip_hdr(skb)->ttl;
66 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
67}
68
69static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
70{
71 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
72}
73
74static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
75{
76 if (IPCB(skb)->opt.optlen == 0)
77 return;
78
79 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
80 ip_hdr(skb) + 1);
81}
82
83
84static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
85 struct sk_buff *skb)
86{
87 unsigned char optbuf[sizeof(struct ip_options) + 40];
88 struct ip_options *opt = (struct ip_options *)optbuf;
89
90 if (IPCB(skb)->opt.optlen == 0)
91 return;
92
93 if (ip_options_echo(net, opt, skb)) {
94 msg->msg_flags |= MSG_CTRUNC;
95 return;
96 }
97 ip_options_undo(opt);
98
99 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
100}
101
102static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
103{
104 int val;
105
106 if (IPCB(skb)->frag_max_size == 0)
107 return;
108
109 val = IPCB(skb)->frag_max_size;
110 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
111}
112
113static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
114 int tlen, int offset)
115{
116 __wsum csum = skb->csum;
117
118 if (skb->ip_summed != CHECKSUM_COMPLETE)
119 return;
120
121 if (offset != 0) {
122 int tend_off = skb_transport_offset(skb) + tlen;
123 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
124 }
125
126 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
127}
128
129static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
130{
131 char *secdata;
132 u32 seclen, secid;
133 int err;
134
135 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
136 if (err)
137 return;
138
139 err = security_secid_to_secctx(secid, &secdata, &seclen);
140 if (err)
141 return;
142
143 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
144 security_release_secctx(secdata, seclen);
145}
146
147static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
148{
149 __be16 _ports[2], *ports;
150 struct sockaddr_in sin;
151
152 /* All current transport protocols have the port numbers in the
153 * first four bytes of the transport header and this function is
154 * written with this assumption in mind.
155 */
156 ports = skb_header_pointer(skb, skb_transport_offset(skb),
157 sizeof(_ports), &_ports);
158 if (!ports)
159 return;
160
161 sin.sin_family = AF_INET;
162 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
163 sin.sin_port = ports[1];
164 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
165
166 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
167}
168
169void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
170 struct sk_buff *skb, int tlen, int offset)
171{
172 unsigned long flags = inet_cmsg_flags(inet_sk(sk));
173
174 if (!flags)
175 return;
176
177 /* Ordered by supposed usage frequency */
178 if (flags & IP_CMSG_PKTINFO) {
179 ip_cmsg_recv_pktinfo(msg, skb);
180
181 flags &= ~IP_CMSG_PKTINFO;
182 if (!flags)
183 return;
184 }
185
186 if (flags & IP_CMSG_TTL) {
187 ip_cmsg_recv_ttl(msg, skb);
188
189 flags &= ~IP_CMSG_TTL;
190 if (!flags)
191 return;
192 }
193
194 if (flags & IP_CMSG_TOS) {
195 ip_cmsg_recv_tos(msg, skb);
196
197 flags &= ~IP_CMSG_TOS;
198 if (!flags)
199 return;
200 }
201
202 if (flags & IP_CMSG_RECVOPTS) {
203 ip_cmsg_recv_opts(msg, skb);
204
205 flags &= ~IP_CMSG_RECVOPTS;
206 if (!flags)
207 return;
208 }
209
210 if (flags & IP_CMSG_RETOPTS) {
211 ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
212
213 flags &= ~IP_CMSG_RETOPTS;
214 if (!flags)
215 return;
216 }
217
218 if (flags & IP_CMSG_PASSSEC) {
219 ip_cmsg_recv_security(msg, skb);
220
221 flags &= ~IP_CMSG_PASSSEC;
222 if (!flags)
223 return;
224 }
225
226 if (flags & IP_CMSG_ORIGDSTADDR) {
227 ip_cmsg_recv_dstaddr(msg, skb);
228
229 flags &= ~IP_CMSG_ORIGDSTADDR;
230 if (!flags)
231 return;
232 }
233
234 if (flags & IP_CMSG_CHECKSUM)
235 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
236
237 if (flags & IP_CMSG_RECVFRAGSIZE)
238 ip_cmsg_recv_fragsize(msg, skb);
239}
240EXPORT_SYMBOL(ip_cmsg_recv_offset);
241
242int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
243 bool allow_ipv6)
244{
245 int err, val;
246 struct cmsghdr *cmsg;
247 struct net *net = sock_net(sk);
248
249 for_each_cmsghdr(cmsg, msg) {
250 if (!CMSG_OK(msg, cmsg))
251 return -EINVAL;
252#if IS_ENABLED(CONFIG_IPV6)
253 if (allow_ipv6 &&
254 cmsg->cmsg_level == SOL_IPV6 &&
255 cmsg->cmsg_type == IPV6_PKTINFO) {
256 struct in6_pktinfo *src_info;
257
258 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
259 return -EINVAL;
260 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
261 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
262 return -EINVAL;
263 if (src_info->ipi6_ifindex)
264 ipc->oif = src_info->ipi6_ifindex;
265 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
266 continue;
267 }
268#endif
269 if (cmsg->cmsg_level == SOL_SOCKET) {
270 err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
271 if (err)
272 return err;
273 continue;
274 }
275
276 if (cmsg->cmsg_level != SOL_IP)
277 continue;
278 switch (cmsg->cmsg_type) {
279 case IP_RETOPTS:
280 err = cmsg->cmsg_len - sizeof(struct cmsghdr);
281
282 /* Our caller is responsible for freeing ipc->opt */
283 err = ip_options_get(net, &ipc->opt,
284 KERNEL_SOCKPTR(CMSG_DATA(cmsg)),
285 err < 40 ? err : 40);
286 if (err)
287 return err;
288 break;
289 case IP_PKTINFO:
290 {
291 struct in_pktinfo *info;
292 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
293 return -EINVAL;
294 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
295 if (info->ipi_ifindex)
296 ipc->oif = info->ipi_ifindex;
297 ipc->addr = info->ipi_spec_dst.s_addr;
298 break;
299 }
300 case IP_TTL:
301 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
302 return -EINVAL;
303 val = *(int *)CMSG_DATA(cmsg);
304 if (val < 1 || val > 255)
305 return -EINVAL;
306 ipc->ttl = val;
307 break;
308 case IP_TOS:
309 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
310 val = *(int *)CMSG_DATA(cmsg);
311 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
312 val = *(u8 *)CMSG_DATA(cmsg);
313 else
314 return -EINVAL;
315 if (val < 0 || val > 255)
316 return -EINVAL;
317 ipc->tos = val;
318 ipc->priority = rt_tos2priority(ipc->tos);
319 break;
320 case IP_PROTOCOL:
321 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
322 return -EINVAL;
323 val = *(int *)CMSG_DATA(cmsg);
324 if (val < 1 || val > 255)
325 return -EINVAL;
326 ipc->protocol = val;
327 break;
328 default:
329 return -EINVAL;
330 }
331 }
332 return 0;
333}
334
335static void ip_ra_destroy_rcu(struct rcu_head *head)
336{
337 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
338
339 sock_put(ra->saved_sk);
340 kfree(ra);
341}
342
343int ip_ra_control(struct sock *sk, unsigned char on,
344 void (*destructor)(struct sock *))
345{
346 struct ip_ra_chain *ra, *new_ra;
347 struct ip_ra_chain __rcu **rap;
348 struct net *net = sock_net(sk);
349
350 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
351 return -EINVAL;
352
353 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
354 if (on && !new_ra)
355 return -ENOMEM;
356
357 mutex_lock(&net->ipv4.ra_mutex);
358 for (rap = &net->ipv4.ra_chain;
359 (ra = rcu_dereference_protected(*rap,
360 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
361 rap = &ra->next) {
362 if (ra->sk == sk) {
363 if (on) {
364 mutex_unlock(&net->ipv4.ra_mutex);
365 kfree(new_ra);
366 return -EADDRINUSE;
367 }
368 /* dont let ip_call_ra_chain() use sk again */
369 ra->sk = NULL;
370 RCU_INIT_POINTER(*rap, ra->next);
371 mutex_unlock(&net->ipv4.ra_mutex);
372
373 if (ra->destructor)
374 ra->destructor(sk);
375 /*
376 * Delay sock_put(sk) and kfree(ra) after one rcu grace
377 * period. This guarantee ip_call_ra_chain() dont need
378 * to mess with socket refcounts.
379 */
380 ra->saved_sk = sk;
381 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
382 return 0;
383 }
384 }
385 if (!new_ra) {
386 mutex_unlock(&net->ipv4.ra_mutex);
387 return -ENOBUFS;
388 }
389 new_ra->sk = sk;
390 new_ra->destructor = destructor;
391
392 RCU_INIT_POINTER(new_ra->next, ra);
393 rcu_assign_pointer(*rap, new_ra);
394 sock_hold(sk);
395 mutex_unlock(&net->ipv4.ra_mutex);
396
397 return 0;
398}
399
400static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb,
401 struct sock_ee_data_rfc4884 *out)
402{
403 switch (icmp_hdr(skb)->type) {
404 case ICMP_DEST_UNREACH:
405 case ICMP_TIME_EXCEEDED:
406 case ICMP_PARAMETERPROB:
407 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr),
408 icmp_hdr(skb)->un.reserved[1] * 4);
409 }
410}
411
412void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
413 __be16 port, u32 info, u8 *payload)
414{
415 struct sock_exterr_skb *serr;
416
417 skb = skb_clone(skb, GFP_ATOMIC);
418 if (!skb)
419 return;
420
421 serr = SKB_EXT_ERR(skb);
422 serr->ee.ee_errno = err;
423 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
424 serr->ee.ee_type = icmp_hdr(skb)->type;
425 serr->ee.ee_code = icmp_hdr(skb)->code;
426 serr->ee.ee_pad = 0;
427 serr->ee.ee_info = info;
428 serr->ee.ee_data = 0;
429 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
430 skb_network_header(skb);
431 serr->port = port;
432
433 if (skb_pull(skb, payload - skb->data)) {
434 if (inet_test_bit(RECVERR_RFC4884, sk))
435 ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
436
437 skb_reset_transport_header(skb);
438 if (sock_queue_err_skb(sk, skb) == 0)
439 return;
440 }
441 kfree_skb(skb);
442}
443EXPORT_SYMBOL_GPL(ip_icmp_error);
444
445void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
446{
447 struct sock_exterr_skb *serr;
448 struct iphdr *iph;
449 struct sk_buff *skb;
450
451 if (!inet_test_bit(RECVERR, sk))
452 return;
453
454 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
455 if (!skb)
456 return;
457
458 skb_put(skb, sizeof(struct iphdr));
459 skb_reset_network_header(skb);
460 iph = ip_hdr(skb);
461 iph->daddr = daddr;
462
463 serr = SKB_EXT_ERR(skb);
464 serr->ee.ee_errno = err;
465 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
466 serr->ee.ee_type = 0;
467 serr->ee.ee_code = 0;
468 serr->ee.ee_pad = 0;
469 serr->ee.ee_info = info;
470 serr->ee.ee_data = 0;
471 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
472 serr->port = port;
473
474 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
475 skb_reset_transport_header(skb);
476
477 if (sock_queue_err_skb(sk, skb))
478 kfree_skb(skb);
479}
480
481/* For some errors we have valid addr_offset even with zero payload and
482 * zero port. Also, addr_offset should be supported if port is set.
483 */
484static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
485{
486 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
487 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
488}
489
490/* IPv4 supports cmsg on all imcp errors and some timestamps
491 *
492 * Timestamp code paths do not initialize the fields expected by cmsg:
493 * the PKTINFO fields in skb->cb[]. Fill those in here.
494 */
495static bool ipv4_datagram_support_cmsg(const struct sock *sk,
496 struct sk_buff *skb,
497 int ee_origin)
498{
499 struct in_pktinfo *info;
500
501 if (ee_origin == SO_EE_ORIGIN_ICMP)
502 return true;
503
504 if (ee_origin == SO_EE_ORIGIN_LOCAL)
505 return false;
506
507 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
508 * timestamp with egress dev. Not possible for packets without iif
509 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
510 */
511 info = PKTINFO_SKB_CB(skb);
512 if (!(READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_CMSG) ||
513 !info->ipi_ifindex)
514 return false;
515
516 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
517 return true;
518}
519
520/*
521 * Handle MSG_ERRQUEUE
522 */
523int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
524{
525 struct sock_exterr_skb *serr;
526 struct sk_buff *skb;
527 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
528 struct {
529 struct sock_extended_err ee;
530 struct sockaddr_in offender;
531 } errhdr;
532 int err;
533 int copied;
534
535 err = -EAGAIN;
536 skb = sock_dequeue_err_skb(sk);
537 if (!skb)
538 goto out;
539
540 copied = skb->len;
541 if (copied > len) {
542 msg->msg_flags |= MSG_TRUNC;
543 copied = len;
544 }
545 err = skb_copy_datagram_msg(skb, 0, msg, copied);
546 if (unlikely(err)) {
547 kfree_skb(skb);
548 return err;
549 }
550 sock_recv_timestamp(msg, sk, skb);
551
552 serr = SKB_EXT_ERR(skb);
553
554 if (sin && ipv4_datagram_support_addr(serr)) {
555 sin->sin_family = AF_INET;
556 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
557 serr->addr_offset);
558 sin->sin_port = serr->port;
559 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
560 *addr_len = sizeof(*sin);
561 }
562
563 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
564 sin = &errhdr.offender;
565 memset(sin, 0, sizeof(*sin));
566
567 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
568 sin->sin_family = AF_INET;
569 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
570 if (inet_cmsg_flags(inet_sk(sk)))
571 ip_cmsg_recv(msg, skb);
572 }
573
574 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
575
576 /* Now we could try to dump offended packet options */
577
578 msg->msg_flags |= MSG_ERRQUEUE;
579 err = copied;
580
581 consume_skb(skb);
582out:
583 return err;
584}
585
586void __ip_sock_set_tos(struct sock *sk, int val)
587{
588 u8 old_tos = inet_sk(sk)->tos;
589
590 if (sk->sk_type == SOCK_STREAM) {
591 val &= ~INET_ECN_MASK;
592 val |= old_tos & INET_ECN_MASK;
593 }
594 if (old_tos != val) {
595 WRITE_ONCE(inet_sk(sk)->tos, val);
596 WRITE_ONCE(sk->sk_priority, rt_tos2priority(val));
597 sk_dst_reset(sk);
598 }
599}
600
601void ip_sock_set_tos(struct sock *sk, int val)
602{
603 sockopt_lock_sock(sk);
604 __ip_sock_set_tos(sk, val);
605 sockopt_release_sock(sk);
606}
607EXPORT_SYMBOL(ip_sock_set_tos);
608
609void ip_sock_set_freebind(struct sock *sk)
610{
611 inet_set_bit(FREEBIND, sk);
612}
613EXPORT_SYMBOL(ip_sock_set_freebind);
614
615void ip_sock_set_recverr(struct sock *sk)
616{
617 inet_set_bit(RECVERR, sk);
618}
619EXPORT_SYMBOL(ip_sock_set_recverr);
620
621int ip_sock_set_mtu_discover(struct sock *sk, int val)
622{
623 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
624 return -EINVAL;
625 WRITE_ONCE(inet_sk(sk)->pmtudisc, val);
626 return 0;
627}
628EXPORT_SYMBOL(ip_sock_set_mtu_discover);
629
630void ip_sock_set_pktinfo(struct sock *sk)
631{
632 inet_set_bit(PKTINFO, sk);
633}
634EXPORT_SYMBOL(ip_sock_set_pktinfo);
635
636/*
637 * Socket option code for IP. This is the end of the line after any
638 * TCP,UDP etc options on an IP socket.
639 */
640static bool setsockopt_needs_rtnl(int optname)
641{
642 switch (optname) {
643 case IP_ADD_MEMBERSHIP:
644 case IP_ADD_SOURCE_MEMBERSHIP:
645 case IP_BLOCK_SOURCE:
646 case IP_DROP_MEMBERSHIP:
647 case IP_DROP_SOURCE_MEMBERSHIP:
648 case IP_MSFILTER:
649 case IP_UNBLOCK_SOURCE:
650 case MCAST_BLOCK_SOURCE:
651 case MCAST_MSFILTER:
652 case MCAST_JOIN_GROUP:
653 case MCAST_JOIN_SOURCE_GROUP:
654 case MCAST_LEAVE_GROUP:
655 case MCAST_LEAVE_SOURCE_GROUP:
656 case MCAST_UNBLOCK_SOURCE:
657 return true;
658 }
659 return false;
660}
661
662static int set_mcast_msfilter(struct sock *sk, int ifindex,
663 int numsrc, int fmode,
664 struct sockaddr_storage *group,
665 struct sockaddr_storage *list)
666{
667 struct ip_msfilter *msf;
668 struct sockaddr_in *psin;
669 int err, i;
670
671 msf = kmalloc(IP_MSFILTER_SIZE(numsrc), GFP_KERNEL);
672 if (!msf)
673 return -ENOBUFS;
674
675 psin = (struct sockaddr_in *)group;
676 if (psin->sin_family != AF_INET)
677 goto Eaddrnotavail;
678 msf->imsf_multiaddr = psin->sin_addr.s_addr;
679 msf->imsf_interface = 0;
680 msf->imsf_fmode = fmode;
681 msf->imsf_numsrc = numsrc;
682 for (i = 0; i < numsrc; ++i) {
683 psin = (struct sockaddr_in *)&list[i];
684
685 if (psin->sin_family != AF_INET)
686 goto Eaddrnotavail;
687 msf->imsf_slist_flex[i] = psin->sin_addr.s_addr;
688 }
689 err = ip_mc_msfilter(sk, msf, ifindex);
690 kfree(msf);
691 return err;
692
693Eaddrnotavail:
694 kfree(msf);
695 return -EADDRNOTAVAIL;
696}
697
698static int copy_group_source_from_sockptr(struct group_source_req *greqs,
699 sockptr_t optval, int optlen)
700{
701 if (in_compat_syscall()) {
702 struct compat_group_source_req gr32;
703
704 if (optlen != sizeof(gr32))
705 return -EINVAL;
706 if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
707 return -EFAULT;
708 greqs->gsr_interface = gr32.gsr_interface;
709 greqs->gsr_group = gr32.gsr_group;
710 greqs->gsr_source = gr32.gsr_source;
711 } else {
712 if (optlen != sizeof(*greqs))
713 return -EINVAL;
714 if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
715 return -EFAULT;
716 }
717
718 return 0;
719}
720
721static int do_mcast_group_source(struct sock *sk, int optname,
722 sockptr_t optval, int optlen)
723{
724 struct group_source_req greqs;
725 struct ip_mreq_source mreqs;
726 struct sockaddr_in *psin;
727 int omode, add, err;
728
729 err = copy_group_source_from_sockptr(&greqs, optval, optlen);
730 if (err)
731 return err;
732
733 if (greqs.gsr_group.ss_family != AF_INET ||
734 greqs.gsr_source.ss_family != AF_INET)
735 return -EADDRNOTAVAIL;
736
737 psin = (struct sockaddr_in *)&greqs.gsr_group;
738 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
739 psin = (struct sockaddr_in *)&greqs.gsr_source;
740 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
741 mreqs.imr_interface = 0; /* use index for mc_source */
742
743 if (optname == MCAST_BLOCK_SOURCE) {
744 omode = MCAST_EXCLUDE;
745 add = 1;
746 } else if (optname == MCAST_UNBLOCK_SOURCE) {
747 omode = MCAST_EXCLUDE;
748 add = 0;
749 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
750 struct ip_mreqn mreq;
751
752 psin = (struct sockaddr_in *)&greqs.gsr_group;
753 mreq.imr_multiaddr = psin->sin_addr;
754 mreq.imr_address.s_addr = 0;
755 mreq.imr_ifindex = greqs.gsr_interface;
756 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
757 if (err && err != -EADDRINUSE)
758 return err;
759 greqs.gsr_interface = mreq.imr_ifindex;
760 omode = MCAST_INCLUDE;
761 add = 1;
762 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
763 omode = MCAST_INCLUDE;
764 add = 0;
765 }
766 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface);
767}
768
769static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
770{
771 struct group_filter *gsf = NULL;
772 int err;
773
774 if (optlen < GROUP_FILTER_SIZE(0))
775 return -EINVAL;
776 if (optlen > READ_ONCE(sock_net(sk)->core.sysctl_optmem_max))
777 return -ENOBUFS;
778
779 gsf = memdup_sockptr(optval, optlen);
780 if (IS_ERR(gsf))
781 return PTR_ERR(gsf);
782
783 /* numsrc >= (4G-140)/128 overflow in 32 bits */
784 err = -ENOBUFS;
785 if (gsf->gf_numsrc >= 0x1ffffff ||
786 gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
787 goto out_free_gsf;
788
789 err = -EINVAL;
790 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
791 goto out_free_gsf;
792
793 err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc,
794 gsf->gf_fmode, &gsf->gf_group,
795 gsf->gf_slist_flex);
796out_free_gsf:
797 kfree(gsf);
798 return err;
799}
800
801static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
802 int optlen)
803{
804 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
805 struct compat_group_filter *gf32;
806 unsigned int n;
807 void *p;
808 int err;
809
810 if (optlen < size0)
811 return -EINVAL;
812 if (optlen > READ_ONCE(sock_net(sk)->core.sysctl_optmem_max) - 4)
813 return -ENOBUFS;
814
815 p = kmalloc(optlen + 4, GFP_KERNEL);
816 if (!p)
817 return -ENOMEM;
818 gf32 = p + 4; /* we want ->gf_group and ->gf_slist_flex aligned */
819
820 err = -EFAULT;
821 if (copy_from_sockptr(gf32, optval, optlen))
822 goto out_free_gsf;
823
824 /* numsrc >= (4G-140)/128 overflow in 32 bits */
825 n = gf32->gf_numsrc;
826 err = -ENOBUFS;
827 if (n >= 0x1ffffff)
828 goto out_free_gsf;
829
830 err = -EINVAL;
831 if (offsetof(struct compat_group_filter, gf_slist_flex[n]) > optlen)
832 goto out_free_gsf;
833
834 /* numsrc >= (4G-140)/128 overflow in 32 bits */
835 err = -ENOBUFS;
836 if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
837 goto out_free_gsf;
838 err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
839 &gf32->gf_group, gf32->gf_slist_flex);
840out_free_gsf:
841 kfree(p);
842 return err;
843}
844
845static int ip_mcast_join_leave(struct sock *sk, int optname,
846 sockptr_t optval, int optlen)
847{
848 struct ip_mreqn mreq = { };
849 struct sockaddr_in *psin;
850 struct group_req greq;
851
852 if (optlen < sizeof(struct group_req))
853 return -EINVAL;
854 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
855 return -EFAULT;
856
857 psin = (struct sockaddr_in *)&greq.gr_group;
858 if (psin->sin_family != AF_INET)
859 return -EINVAL;
860 mreq.imr_multiaddr = psin->sin_addr;
861 mreq.imr_ifindex = greq.gr_interface;
862 if (optname == MCAST_JOIN_GROUP)
863 return ip_mc_join_group(sk, &mreq);
864 return ip_mc_leave_group(sk, &mreq);
865}
866
867static int compat_ip_mcast_join_leave(struct sock *sk, int optname,
868 sockptr_t optval, int optlen)
869{
870 struct compat_group_req greq;
871 struct ip_mreqn mreq = { };
872 struct sockaddr_in *psin;
873
874 if (optlen < sizeof(struct compat_group_req))
875 return -EINVAL;
876 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
877 return -EFAULT;
878
879 psin = (struct sockaddr_in *)&greq.gr_group;
880 if (psin->sin_family != AF_INET)
881 return -EINVAL;
882 mreq.imr_multiaddr = psin->sin_addr;
883 mreq.imr_ifindex = greq.gr_interface;
884
885 if (optname == MCAST_JOIN_GROUP)
886 return ip_mc_join_group(sk, &mreq);
887 return ip_mc_leave_group(sk, &mreq);
888}
889
890DEFINE_STATIC_KEY_FALSE(ip4_min_ttl);
891
892int do_ip_setsockopt(struct sock *sk, int level, int optname,
893 sockptr_t optval, unsigned int optlen)
894{
895 struct inet_sock *inet = inet_sk(sk);
896 struct net *net = sock_net(sk);
897 int val = 0, err, retv;
898 bool needs_rtnl = setsockopt_needs_rtnl(optname);
899
900 switch (optname) {
901 case IP_PKTINFO:
902 case IP_RECVTTL:
903 case IP_RECVOPTS:
904 case IP_RECVTOS:
905 case IP_RETOPTS:
906 case IP_TOS:
907 case IP_TTL:
908 case IP_HDRINCL:
909 case IP_MTU_DISCOVER:
910 case IP_RECVERR:
911 case IP_ROUTER_ALERT:
912 case IP_FREEBIND:
913 case IP_PASSSEC:
914 case IP_TRANSPARENT:
915 case IP_MINTTL:
916 case IP_NODEFRAG:
917 case IP_BIND_ADDRESS_NO_PORT:
918 case IP_UNICAST_IF:
919 case IP_MULTICAST_TTL:
920 case IP_MULTICAST_ALL:
921 case IP_MULTICAST_LOOP:
922 case IP_RECVORIGDSTADDR:
923 case IP_CHECKSUM:
924 case IP_RECVFRAGSIZE:
925 case IP_RECVERR_RFC4884:
926 case IP_LOCAL_PORT_RANGE:
927 if (optlen >= sizeof(int)) {
928 if (copy_from_sockptr(&val, optval, sizeof(val)))
929 return -EFAULT;
930 } else if (optlen >= sizeof(char)) {
931 unsigned char ucval;
932
933 if (copy_from_sockptr(&ucval, optval, sizeof(ucval)))
934 return -EFAULT;
935 val = (int) ucval;
936 }
937 }
938
939 /* If optlen==0, it is equivalent to val == 0 */
940
941 if (optname == IP_ROUTER_ALERT) {
942 retv = ip_ra_control(sk, val ? 1 : 0, NULL);
943 if (retv == 0)
944 inet_assign_bit(RTALERT, sk, val);
945 return retv;
946 }
947 if (ip_mroute_opt(optname))
948 return ip_mroute_setsockopt(sk, optname, optval, optlen);
949
950 /* Handle options that can be set without locking the socket. */
951 switch (optname) {
952 case IP_PKTINFO:
953 inet_assign_bit(PKTINFO, sk, val);
954 return 0;
955 case IP_RECVTTL:
956 inet_assign_bit(TTL, sk, val);
957 return 0;
958 case IP_RECVTOS:
959 inet_assign_bit(TOS, sk, val);
960 return 0;
961 case IP_RECVOPTS:
962 inet_assign_bit(RECVOPTS, sk, val);
963 return 0;
964 case IP_RETOPTS:
965 inet_assign_bit(RETOPTS, sk, val);
966 return 0;
967 case IP_PASSSEC:
968 inet_assign_bit(PASSSEC, sk, val);
969 return 0;
970 case IP_RECVORIGDSTADDR:
971 inet_assign_bit(ORIGDSTADDR, sk, val);
972 return 0;
973 case IP_RECVFRAGSIZE:
974 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
975 return -EINVAL;
976 inet_assign_bit(RECVFRAGSIZE, sk, val);
977 return 0;
978 case IP_RECVERR:
979 inet_assign_bit(RECVERR, sk, val);
980 if (!val)
981 skb_errqueue_purge(&sk->sk_error_queue);
982 return 0;
983 case IP_RECVERR_RFC4884:
984 if (val < 0 || val > 1)
985 return -EINVAL;
986 inet_assign_bit(RECVERR_RFC4884, sk, val);
987 return 0;
988 case IP_FREEBIND:
989 if (optlen < 1)
990 return -EINVAL;
991 inet_assign_bit(FREEBIND, sk, val);
992 return 0;
993 case IP_HDRINCL:
994 if (sk->sk_type != SOCK_RAW)
995 return -ENOPROTOOPT;
996 inet_assign_bit(HDRINCL, sk, val);
997 return 0;
998 case IP_MULTICAST_LOOP:
999 if (optlen < 1)
1000 return -EINVAL;
1001 inet_assign_bit(MC_LOOP, sk, val);
1002 return 0;
1003 case IP_MULTICAST_ALL:
1004 if (optlen < 1)
1005 return -EINVAL;
1006 if (val != 0 && val != 1)
1007 return -EINVAL;
1008 inet_assign_bit(MC_ALL, sk, val);
1009 return 0;
1010 case IP_TRANSPARENT:
1011 if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1012 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1013 return -EPERM;
1014 if (optlen < 1)
1015 return -EINVAL;
1016 inet_assign_bit(TRANSPARENT, sk, val);
1017 return 0;
1018 case IP_NODEFRAG:
1019 if (sk->sk_type != SOCK_RAW)
1020 return -ENOPROTOOPT;
1021 inet_assign_bit(NODEFRAG, sk, val);
1022 return 0;
1023 case IP_BIND_ADDRESS_NO_PORT:
1024 inet_assign_bit(BIND_ADDRESS_NO_PORT, sk, val);
1025 return 0;
1026 case IP_TTL:
1027 if (optlen < 1)
1028 return -EINVAL;
1029 if (val != -1 && (val < 1 || val > 255))
1030 return -EINVAL;
1031 WRITE_ONCE(inet->uc_ttl, val);
1032 return 0;
1033 case IP_MINTTL:
1034 if (optlen < 1)
1035 return -EINVAL;
1036 if (val < 0 || val > 255)
1037 return -EINVAL;
1038
1039 if (val)
1040 static_branch_enable(&ip4_min_ttl);
1041
1042 WRITE_ONCE(inet->min_ttl, val);
1043 return 0;
1044 case IP_MULTICAST_TTL:
1045 if (sk->sk_type == SOCK_STREAM)
1046 return -EINVAL;
1047 if (optlen < 1)
1048 return -EINVAL;
1049 if (val == -1)
1050 val = 1;
1051 if (val < 0 || val > 255)
1052 return -EINVAL;
1053 WRITE_ONCE(inet->mc_ttl, val);
1054 return 0;
1055 case IP_MTU_DISCOVER:
1056 return ip_sock_set_mtu_discover(sk, val);
1057 case IP_TOS: /* This sets both TOS and Precedence */
1058 ip_sock_set_tos(sk, val);
1059 return 0;
1060 case IP_LOCAL_PORT_RANGE:
1061 {
1062 u16 lo = val;
1063 u16 hi = val >> 16;
1064
1065 if (optlen != sizeof(u32))
1066 return -EINVAL;
1067 if (lo != 0 && hi != 0 && lo > hi)
1068 return -EINVAL;
1069
1070 WRITE_ONCE(inet->local_port_range, val);
1071 return 0;
1072 }
1073 }
1074
1075 err = 0;
1076 if (needs_rtnl)
1077 rtnl_lock();
1078 sockopt_lock_sock(sk);
1079
1080 switch (optname) {
1081 case IP_OPTIONS:
1082 {
1083 struct ip_options_rcu *old, *opt = NULL;
1084
1085 if (optlen > 40)
1086 goto e_inval;
1087 err = ip_options_get(sock_net(sk), &opt, optval, optlen);
1088 if (err)
1089 break;
1090 old = rcu_dereference_protected(inet->inet_opt,
1091 lockdep_sock_is_held(sk));
1092 if (inet_test_bit(IS_ICSK, sk)) {
1093 struct inet_connection_sock *icsk = inet_csk(sk);
1094#if IS_ENABLED(CONFIG_IPV6)
1095 if (sk->sk_family == PF_INET ||
1096 (!((1 << sk->sk_state) &
1097 (TCPF_LISTEN | TCPF_CLOSE)) &&
1098 inet->inet_daddr != LOOPBACK4_IPV6)) {
1099#endif
1100 if (old)
1101 icsk->icsk_ext_hdr_len -= old->opt.optlen;
1102 if (opt)
1103 icsk->icsk_ext_hdr_len += opt->opt.optlen;
1104 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
1105#if IS_ENABLED(CONFIG_IPV6)
1106 }
1107#endif
1108 }
1109 rcu_assign_pointer(inet->inet_opt, opt);
1110 if (old)
1111 kfree_rcu(old, rcu);
1112 break;
1113 }
1114 case IP_CHECKSUM:
1115 if (val) {
1116 if (!(inet_test_bit(CHECKSUM, sk))) {
1117 inet_inc_convert_csum(sk);
1118 inet_set_bit(CHECKSUM, sk);
1119 }
1120 } else {
1121 if (inet_test_bit(CHECKSUM, sk)) {
1122 inet_dec_convert_csum(sk);
1123 inet_clear_bit(CHECKSUM, sk);
1124 }
1125 }
1126 break;
1127 case IP_UNICAST_IF:
1128 {
1129 struct net_device *dev = NULL;
1130 int ifindex;
1131 int midx;
1132
1133 if (optlen != sizeof(int))
1134 goto e_inval;
1135
1136 ifindex = (__force int)ntohl((__force __be32)val);
1137 if (ifindex == 0) {
1138 WRITE_ONCE(inet->uc_index, 0);
1139 err = 0;
1140 break;
1141 }
1142
1143 dev = dev_get_by_index(sock_net(sk), ifindex);
1144 err = -EADDRNOTAVAIL;
1145 if (!dev)
1146 break;
1147
1148 midx = l3mdev_master_ifindex(dev);
1149 dev_put(dev);
1150
1151 err = -EINVAL;
1152 if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
1153 break;
1154
1155 WRITE_ONCE(inet->uc_index, ifindex);
1156 err = 0;
1157 break;
1158 }
1159 case IP_MULTICAST_IF:
1160 {
1161 struct ip_mreqn mreq;
1162 struct net_device *dev = NULL;
1163 int midx;
1164
1165 if (sk->sk_type == SOCK_STREAM)
1166 goto e_inval;
1167 /*
1168 * Check the arguments are allowable
1169 */
1170
1171 if (optlen < sizeof(struct in_addr))
1172 goto e_inval;
1173
1174 err = -EFAULT;
1175 if (optlen >= sizeof(struct ip_mreqn)) {
1176 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1177 break;
1178 } else {
1179 memset(&mreq, 0, sizeof(mreq));
1180 if (optlen >= sizeof(struct ip_mreq)) {
1181 if (copy_from_sockptr(&mreq, optval,
1182 sizeof(struct ip_mreq)))
1183 break;
1184 } else if (optlen >= sizeof(struct in_addr)) {
1185 if (copy_from_sockptr(&mreq.imr_address, optval,
1186 sizeof(struct in_addr)))
1187 break;
1188 }
1189 }
1190
1191 if (!mreq.imr_ifindex) {
1192 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
1193 WRITE_ONCE(inet->mc_index, 0);
1194 WRITE_ONCE(inet->mc_addr, 0);
1195 err = 0;
1196 break;
1197 }
1198 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
1199 if (dev)
1200 mreq.imr_ifindex = dev->ifindex;
1201 } else
1202 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
1203
1204
1205 err = -EADDRNOTAVAIL;
1206 if (!dev)
1207 break;
1208
1209 midx = l3mdev_master_ifindex(dev);
1210
1211 dev_put(dev);
1212
1213 err = -EINVAL;
1214 if (sk->sk_bound_dev_if &&
1215 mreq.imr_ifindex != sk->sk_bound_dev_if &&
1216 midx != sk->sk_bound_dev_if)
1217 break;
1218
1219 WRITE_ONCE(inet->mc_index, mreq.imr_ifindex);
1220 WRITE_ONCE(inet->mc_addr, mreq.imr_address.s_addr);
1221 err = 0;
1222 break;
1223 }
1224
1225 case IP_ADD_MEMBERSHIP:
1226 case IP_DROP_MEMBERSHIP:
1227 {
1228 struct ip_mreqn mreq;
1229
1230 err = -EPROTO;
1231 if (inet_test_bit(IS_ICSK, sk))
1232 break;
1233
1234 if (optlen < sizeof(struct ip_mreq))
1235 goto e_inval;
1236 err = -EFAULT;
1237 if (optlen >= sizeof(struct ip_mreqn)) {
1238 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1239 break;
1240 } else {
1241 memset(&mreq, 0, sizeof(mreq));
1242 if (copy_from_sockptr(&mreq, optval,
1243 sizeof(struct ip_mreq)))
1244 break;
1245 }
1246
1247 if (optname == IP_ADD_MEMBERSHIP)
1248 err = ip_mc_join_group(sk, &mreq);
1249 else
1250 err = ip_mc_leave_group(sk, &mreq);
1251 break;
1252 }
1253 case IP_MSFILTER:
1254 {
1255 struct ip_msfilter *msf;
1256
1257 if (optlen < IP_MSFILTER_SIZE(0))
1258 goto e_inval;
1259 if (optlen > READ_ONCE(net->core.sysctl_optmem_max)) {
1260 err = -ENOBUFS;
1261 break;
1262 }
1263 msf = memdup_sockptr(optval, optlen);
1264 if (IS_ERR(msf)) {
1265 err = PTR_ERR(msf);
1266 break;
1267 }
1268 /* numsrc >= (1G-4) overflow in 32 bits */
1269 if (msf->imsf_numsrc >= 0x3ffffffcU ||
1270 msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
1271 kfree(msf);
1272 err = -ENOBUFS;
1273 break;
1274 }
1275 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
1276 kfree(msf);
1277 err = -EINVAL;
1278 break;
1279 }
1280 err = ip_mc_msfilter(sk, msf, 0);
1281 kfree(msf);
1282 break;
1283 }
1284 case IP_BLOCK_SOURCE:
1285 case IP_UNBLOCK_SOURCE:
1286 case IP_ADD_SOURCE_MEMBERSHIP:
1287 case IP_DROP_SOURCE_MEMBERSHIP:
1288 {
1289 struct ip_mreq_source mreqs;
1290 int omode, add;
1291
1292 if (optlen != sizeof(struct ip_mreq_source))
1293 goto e_inval;
1294 if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) {
1295 err = -EFAULT;
1296 break;
1297 }
1298 if (optname == IP_BLOCK_SOURCE) {
1299 omode = MCAST_EXCLUDE;
1300 add = 1;
1301 } else if (optname == IP_UNBLOCK_SOURCE) {
1302 omode = MCAST_EXCLUDE;
1303 add = 0;
1304 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
1305 struct ip_mreqn mreq;
1306
1307 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
1308 mreq.imr_address.s_addr = mreqs.imr_interface;
1309 mreq.imr_ifindex = 0;
1310 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1311 if (err && err != -EADDRINUSE)
1312 break;
1313 omode = MCAST_INCLUDE;
1314 add = 1;
1315 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
1316 omode = MCAST_INCLUDE;
1317 add = 0;
1318 }
1319 err = ip_mc_source(add, omode, sk, &mreqs, 0);
1320 break;
1321 }
1322 case MCAST_JOIN_GROUP:
1323 case MCAST_LEAVE_GROUP:
1324 if (in_compat_syscall())
1325 err = compat_ip_mcast_join_leave(sk, optname, optval,
1326 optlen);
1327 else
1328 err = ip_mcast_join_leave(sk, optname, optval, optlen);
1329 break;
1330 case MCAST_JOIN_SOURCE_GROUP:
1331 case MCAST_LEAVE_SOURCE_GROUP:
1332 case MCAST_BLOCK_SOURCE:
1333 case MCAST_UNBLOCK_SOURCE:
1334 err = do_mcast_group_source(sk, optname, optval, optlen);
1335 break;
1336 case MCAST_MSFILTER:
1337 if (in_compat_syscall())
1338 err = compat_ip_set_mcast_msfilter(sk, optval, optlen);
1339 else
1340 err = ip_set_mcast_msfilter(sk, optval, optlen);
1341 break;
1342 case IP_IPSEC_POLICY:
1343 case IP_XFRM_POLICY:
1344 err = -EPERM;
1345 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1346 break;
1347 err = xfrm_user_policy(sk, optname, optval, optlen);
1348 break;
1349
1350 default:
1351 err = -ENOPROTOOPT;
1352 break;
1353 }
1354 sockopt_release_sock(sk);
1355 if (needs_rtnl)
1356 rtnl_unlock();
1357 return err;
1358
1359e_inval:
1360 sockopt_release_sock(sk);
1361 if (needs_rtnl)
1362 rtnl_unlock();
1363 return -EINVAL;
1364}
1365
1366/**
1367 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1368 * @sk: socket
1369 * @skb: buffer
1370 * @drop_dst: if true, drops skb dst
1371 *
1372 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1373 * destination in skb->cb[] before dst drop.
1374 * This way, receiver doesn't make cache line misses to read rtable.
1375 */
1376void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst)
1377{
1378 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1379 bool prepare = inet_test_bit(PKTINFO, sk) ||
1380 ipv6_sk_rxinfo(sk);
1381
1382 if (prepare && skb_rtable(skb)) {
1383 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1384 * which has interface index (iif) as the first member of the
1385 * underlying inet{6}_skb_parm struct. This code then overlays
1386 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1387 * element so the iif is picked up from the prior IPCB. If iif
1388 * is the loopback interface, then return the sending interface
1389 * (e.g., process binds socket to eth0 for Tx which is
1390 * redirected to loopback in the rtable/dst).
1391 */
1392 struct rtable *rt = skb_rtable(skb);
1393 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
1394
1395 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1396 pktinfo->ipi_ifindex = inet_iif(skb);
1397 else if (l3slave && rt && rt->rt_iif)
1398 pktinfo->ipi_ifindex = rt->rt_iif;
1399
1400 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1401 } else {
1402 pktinfo->ipi_ifindex = 0;
1403 pktinfo->ipi_spec_dst.s_addr = 0;
1404 }
1405 if (drop_dst)
1406 skb_dst_drop(skb);
1407}
1408
1409int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1410 unsigned int optlen)
1411{
1412 int err;
1413
1414 if (level != SOL_IP)
1415 return -ENOPROTOOPT;
1416
1417 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1418#ifdef CONFIG_NETFILTER
1419 /* we need to exclude all possible ENOPROTOOPTs except default case */
1420 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1421 optname != IP_IPSEC_POLICY &&
1422 optname != IP_XFRM_POLICY &&
1423 !ip_mroute_opt(optname))
1424 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1425#endif
1426 return err;
1427}
1428EXPORT_SYMBOL(ip_setsockopt);
1429
1430/*
1431 * Get the options. Note for future reference. The GET of IP options gets
1432 * the _received_ ones. The set sets the _sent_ ones.
1433 */
1434
1435static bool getsockopt_needs_rtnl(int optname)
1436{
1437 switch (optname) {
1438 case IP_MSFILTER:
1439 case MCAST_MSFILTER:
1440 return true;
1441 }
1442 return false;
1443}
1444
1445static int ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1446 sockptr_t optlen, int len)
1447{
1448 const int size0 = offsetof(struct group_filter, gf_slist_flex);
1449 struct group_filter gsf;
1450 int num, gsf_size;
1451 int err;
1452
1453 if (len < size0)
1454 return -EINVAL;
1455 if (copy_from_sockptr(&gsf, optval, size0))
1456 return -EFAULT;
1457
1458 num = gsf.gf_numsrc;
1459 err = ip_mc_gsfget(sk, &gsf, optval,
1460 offsetof(struct group_filter, gf_slist_flex));
1461 if (err)
1462 return err;
1463 if (gsf.gf_numsrc < num)
1464 num = gsf.gf_numsrc;
1465 gsf_size = GROUP_FILTER_SIZE(num);
1466 if (copy_to_sockptr(optlen, &gsf_size, sizeof(int)) ||
1467 copy_to_sockptr(optval, &gsf, size0))
1468 return -EFAULT;
1469 return 0;
1470}
1471
1472static int compat_ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1473 sockptr_t optlen, int len)
1474{
1475 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
1476 struct compat_group_filter gf32;
1477 struct group_filter gf;
1478 int num;
1479 int err;
1480
1481 if (len < size0)
1482 return -EINVAL;
1483 if (copy_from_sockptr(&gf32, optval, size0))
1484 return -EFAULT;
1485
1486 gf.gf_interface = gf32.gf_interface;
1487 gf.gf_fmode = gf32.gf_fmode;
1488 num = gf.gf_numsrc = gf32.gf_numsrc;
1489 gf.gf_group = gf32.gf_group;
1490
1491 err = ip_mc_gsfget(sk, &gf, optval,
1492 offsetof(struct compat_group_filter, gf_slist_flex));
1493 if (err)
1494 return err;
1495 if (gf.gf_numsrc < num)
1496 num = gf.gf_numsrc;
1497 len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
1498 if (copy_to_sockptr(optlen, &len, sizeof(int)) ||
1499 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode),
1500 &gf.gf_fmode, sizeof(gf.gf_fmode)) ||
1501 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc),
1502 &gf.gf_numsrc, sizeof(gf.gf_numsrc)))
1503 return -EFAULT;
1504 return 0;
1505}
1506
1507int do_ip_getsockopt(struct sock *sk, int level, int optname,
1508 sockptr_t optval, sockptr_t optlen)
1509{
1510 struct inet_sock *inet = inet_sk(sk);
1511 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1512 int val, err = 0;
1513 int len;
1514
1515 if (level != SOL_IP)
1516 return -EOPNOTSUPP;
1517
1518 if (ip_mroute_opt(optname))
1519 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1520
1521 if (copy_from_sockptr(&len, optlen, sizeof(int)))
1522 return -EFAULT;
1523 if (len < 0)
1524 return -EINVAL;
1525
1526 /* Handle options that can be read without locking the socket. */
1527 switch (optname) {
1528 case IP_PKTINFO:
1529 val = inet_test_bit(PKTINFO, sk);
1530 goto copyval;
1531 case IP_RECVTTL:
1532 val = inet_test_bit(TTL, sk);
1533 goto copyval;
1534 case IP_RECVTOS:
1535 val = inet_test_bit(TOS, sk);
1536 goto copyval;
1537 case IP_RECVOPTS:
1538 val = inet_test_bit(RECVOPTS, sk);
1539 goto copyval;
1540 case IP_RETOPTS:
1541 val = inet_test_bit(RETOPTS, sk);
1542 goto copyval;
1543 case IP_PASSSEC:
1544 val = inet_test_bit(PASSSEC, sk);
1545 goto copyval;
1546 case IP_RECVORIGDSTADDR:
1547 val = inet_test_bit(ORIGDSTADDR, sk);
1548 goto copyval;
1549 case IP_CHECKSUM:
1550 val = inet_test_bit(CHECKSUM, sk);
1551 goto copyval;
1552 case IP_RECVFRAGSIZE:
1553 val = inet_test_bit(RECVFRAGSIZE, sk);
1554 goto copyval;
1555 case IP_RECVERR:
1556 val = inet_test_bit(RECVERR, sk);
1557 goto copyval;
1558 case IP_RECVERR_RFC4884:
1559 val = inet_test_bit(RECVERR_RFC4884, sk);
1560 goto copyval;
1561 case IP_FREEBIND:
1562 val = inet_test_bit(FREEBIND, sk);
1563 goto copyval;
1564 case IP_HDRINCL:
1565 val = inet_test_bit(HDRINCL, sk);
1566 goto copyval;
1567 case IP_MULTICAST_LOOP:
1568 val = inet_test_bit(MC_LOOP, sk);
1569 goto copyval;
1570 case IP_MULTICAST_ALL:
1571 val = inet_test_bit(MC_ALL, sk);
1572 goto copyval;
1573 case IP_TRANSPARENT:
1574 val = inet_test_bit(TRANSPARENT, sk);
1575 goto copyval;
1576 case IP_NODEFRAG:
1577 val = inet_test_bit(NODEFRAG, sk);
1578 goto copyval;
1579 case IP_BIND_ADDRESS_NO_PORT:
1580 val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
1581 goto copyval;
1582 case IP_ROUTER_ALERT:
1583 val = inet_test_bit(RTALERT, sk);
1584 goto copyval;
1585 case IP_TTL:
1586 val = READ_ONCE(inet->uc_ttl);
1587 if (val < 0)
1588 val = READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_default_ttl);
1589 goto copyval;
1590 case IP_MINTTL:
1591 val = READ_ONCE(inet->min_ttl);
1592 goto copyval;
1593 case IP_MULTICAST_TTL:
1594 val = READ_ONCE(inet->mc_ttl);
1595 goto copyval;
1596 case IP_MTU_DISCOVER:
1597 val = READ_ONCE(inet->pmtudisc);
1598 goto copyval;
1599 case IP_TOS:
1600 val = READ_ONCE(inet->tos);
1601 goto copyval;
1602 case IP_OPTIONS:
1603 {
1604 unsigned char optbuf[sizeof(struct ip_options)+40];
1605 struct ip_options *opt = (struct ip_options *)optbuf;
1606 struct ip_options_rcu *inet_opt;
1607
1608 rcu_read_lock();
1609 inet_opt = rcu_dereference(inet->inet_opt);
1610 opt->optlen = 0;
1611 if (inet_opt)
1612 memcpy(optbuf, &inet_opt->opt,
1613 sizeof(struct ip_options) +
1614 inet_opt->opt.optlen);
1615 rcu_read_unlock();
1616
1617 if (opt->optlen == 0) {
1618 len = 0;
1619 return copy_to_sockptr(optlen, &len, sizeof(int));
1620 }
1621
1622 ip_options_undo(opt);
1623
1624 len = min_t(unsigned int, len, opt->optlen);
1625 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1626 return -EFAULT;
1627 if (copy_to_sockptr(optval, opt->__data, len))
1628 return -EFAULT;
1629 return 0;
1630 }
1631 case IP_MTU:
1632 {
1633 struct dst_entry *dst;
1634 val = 0;
1635 dst = sk_dst_get(sk);
1636 if (dst) {
1637 val = dst_mtu(dst);
1638 dst_release(dst);
1639 }
1640 if (!val)
1641 return -ENOTCONN;
1642 goto copyval;
1643 }
1644 case IP_PKTOPTIONS:
1645 {
1646 struct msghdr msg;
1647
1648 if (sk->sk_type != SOCK_STREAM)
1649 return -ENOPROTOOPT;
1650
1651 if (optval.is_kernel) {
1652 msg.msg_control_is_user = false;
1653 msg.msg_control = optval.kernel;
1654 } else {
1655 msg.msg_control_is_user = true;
1656 msg.msg_control_user = optval.user;
1657 }
1658 msg.msg_controllen = len;
1659 msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
1660
1661 if (inet_test_bit(PKTINFO, sk)) {
1662 struct in_pktinfo info;
1663
1664 info.ipi_addr.s_addr = READ_ONCE(inet->inet_rcv_saddr);
1665 info.ipi_spec_dst.s_addr = READ_ONCE(inet->inet_rcv_saddr);
1666 info.ipi_ifindex = READ_ONCE(inet->mc_index);
1667 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1668 }
1669 if (inet_test_bit(TTL, sk)) {
1670 int hlim = READ_ONCE(inet->mc_ttl);
1671
1672 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1673 }
1674 if (inet_test_bit(TOS, sk)) {
1675 int tos = READ_ONCE(inet->rcv_tos);
1676 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1677 }
1678 len -= msg.msg_controllen;
1679 return copy_to_sockptr(optlen, &len, sizeof(int));
1680 }
1681 case IP_UNICAST_IF:
1682 val = (__force int)htonl((__u32) READ_ONCE(inet->uc_index));
1683 goto copyval;
1684 case IP_MULTICAST_IF:
1685 {
1686 struct in_addr addr;
1687 len = min_t(unsigned int, len, sizeof(struct in_addr));
1688 addr.s_addr = READ_ONCE(inet->mc_addr);
1689
1690 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1691 return -EFAULT;
1692 if (copy_to_sockptr(optval, &addr, len))
1693 return -EFAULT;
1694 return 0;
1695 }
1696 case IP_LOCAL_PORT_RANGE:
1697 val = READ_ONCE(inet->local_port_range);
1698 goto copyval;
1699 }
1700
1701 if (needs_rtnl)
1702 rtnl_lock();
1703 sockopt_lock_sock(sk);
1704
1705 switch (optname) {
1706 case IP_MSFILTER:
1707 {
1708 struct ip_msfilter msf;
1709
1710 if (len < IP_MSFILTER_SIZE(0)) {
1711 err = -EINVAL;
1712 goto out;
1713 }
1714 if (copy_from_sockptr(&msf, optval, IP_MSFILTER_SIZE(0))) {
1715 err = -EFAULT;
1716 goto out;
1717 }
1718 err = ip_mc_msfget(sk, &msf, optval, optlen);
1719 goto out;
1720 }
1721 case MCAST_MSFILTER:
1722 if (in_compat_syscall())
1723 err = compat_ip_get_mcast_msfilter(sk, optval, optlen,
1724 len);
1725 else
1726 err = ip_get_mcast_msfilter(sk, optval, optlen, len);
1727 goto out;
1728 case IP_PROTOCOL:
1729 val = inet_sk(sk)->inet_num;
1730 break;
1731 default:
1732 sockopt_release_sock(sk);
1733 return -ENOPROTOOPT;
1734 }
1735 sockopt_release_sock(sk);
1736copyval:
1737 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1738 unsigned char ucval = (unsigned char)val;
1739 len = 1;
1740 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1741 return -EFAULT;
1742 if (copy_to_sockptr(optval, &ucval, 1))
1743 return -EFAULT;
1744 } else {
1745 len = min_t(unsigned int, sizeof(int), len);
1746 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1747 return -EFAULT;
1748 if (copy_to_sockptr(optval, &val, len))
1749 return -EFAULT;
1750 }
1751 return 0;
1752
1753out:
1754 sockopt_release_sock(sk);
1755 if (needs_rtnl)
1756 rtnl_unlock();
1757 return err;
1758}
1759
1760int ip_getsockopt(struct sock *sk, int level,
1761 int optname, char __user *optval, int __user *optlen)
1762{
1763 int err;
1764
1765 err = do_ip_getsockopt(sk, level, optname,
1766 USER_SOCKPTR(optval), USER_SOCKPTR(optlen));
1767
1768#ifdef CONFIG_NETFILTER
1769 /* we need to exclude all possible ENOPROTOOPTs except default case */
1770 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1771 !ip_mroute_opt(optname)) {
1772 int len;
1773
1774 if (get_user(len, optlen))
1775 return -EFAULT;
1776
1777 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1778 if (err >= 0)
1779 err = put_user(len, optlen);
1780 return err;
1781 }
1782#endif
1783 return err;
1784}
1785EXPORT_SYMBOL(ip_getsockopt);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP to API glue.
8 *
9 * Authors: see ip.c
10 *
11 * Fixes:
12 * Many : Split from ip.c , see ip.c for history.
13 * Martin Mares : TOS setting fixed.
14 * Alan Cox : Fixed a couple of oopses in Martin's
15 * TOS tweaks.
16 * Mike McLagan : Routing by source
17 */
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/mm.h>
22#include <linux/skbuff.h>
23#include <linux/ip.h>
24#include <linux/icmp.h>
25#include <linux/inetdevice.h>
26#include <linux/netdevice.h>
27#include <linux/slab.h>
28#include <net/sock.h>
29#include <net/ip.h>
30#include <net/icmp.h>
31#include <net/tcp_states.h>
32#include <linux/udp.h>
33#include <linux/igmp.h>
34#include <linux/netfilter.h>
35#include <linux/route.h>
36#include <linux/mroute.h>
37#include <net/inet_ecn.h>
38#include <net/route.h>
39#include <net/xfrm.h>
40#include <net/compat.h>
41#include <net/checksum.h>
42#if IS_ENABLED(CONFIG_IPV6)
43#include <net/transp_v6.h>
44#endif
45#include <net/ip_fib.h>
46
47#include <linux/errqueue.h>
48#include <linux/uaccess.h>
49
50/*
51 * SOL_IP control messages.
52 */
53
54static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
55{
56 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
57
58 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
59
60 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
61}
62
63static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
64{
65 int ttl = ip_hdr(skb)->ttl;
66 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
67}
68
69static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
70{
71 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
72}
73
74static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
75{
76 if (IPCB(skb)->opt.optlen == 0)
77 return;
78
79 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
80 ip_hdr(skb) + 1);
81}
82
83
84static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
85 struct sk_buff *skb)
86{
87 unsigned char optbuf[sizeof(struct ip_options) + 40];
88 struct ip_options *opt = (struct ip_options *)optbuf;
89
90 if (IPCB(skb)->opt.optlen == 0)
91 return;
92
93 if (ip_options_echo(net, opt, skb)) {
94 msg->msg_flags |= MSG_CTRUNC;
95 return;
96 }
97 ip_options_undo(opt);
98
99 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
100}
101
102static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
103{
104 int val;
105
106 if (IPCB(skb)->frag_max_size == 0)
107 return;
108
109 val = IPCB(skb)->frag_max_size;
110 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
111}
112
113static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
114 int tlen, int offset)
115{
116 __wsum csum = skb->csum;
117
118 if (skb->ip_summed != CHECKSUM_COMPLETE)
119 return;
120
121 if (offset != 0) {
122 int tend_off = skb_transport_offset(skb) + tlen;
123 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
124 }
125
126 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
127}
128
129static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
130{
131 char *secdata;
132 u32 seclen, secid;
133 int err;
134
135 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
136 if (err)
137 return;
138
139 err = security_secid_to_secctx(secid, &secdata, &seclen);
140 if (err)
141 return;
142
143 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
144 security_release_secctx(secdata, seclen);
145}
146
147static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
148{
149 __be16 _ports[2], *ports;
150 struct sockaddr_in sin;
151
152 /* All current transport protocols have the port numbers in the
153 * first four bytes of the transport header and this function is
154 * written with this assumption in mind.
155 */
156 ports = skb_header_pointer(skb, skb_transport_offset(skb),
157 sizeof(_ports), &_ports);
158 if (!ports)
159 return;
160
161 sin.sin_family = AF_INET;
162 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
163 sin.sin_port = ports[1];
164 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
165
166 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
167}
168
169void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
170 struct sk_buff *skb, int tlen, int offset)
171{
172 unsigned long flags = inet_cmsg_flags(inet_sk(sk));
173
174 if (!flags)
175 return;
176
177 /* Ordered by supposed usage frequency */
178 if (flags & IP_CMSG_PKTINFO) {
179 ip_cmsg_recv_pktinfo(msg, skb);
180
181 flags &= ~IP_CMSG_PKTINFO;
182 if (!flags)
183 return;
184 }
185
186 if (flags & IP_CMSG_TTL) {
187 ip_cmsg_recv_ttl(msg, skb);
188
189 flags &= ~IP_CMSG_TTL;
190 if (!flags)
191 return;
192 }
193
194 if (flags & IP_CMSG_TOS) {
195 ip_cmsg_recv_tos(msg, skb);
196
197 flags &= ~IP_CMSG_TOS;
198 if (!flags)
199 return;
200 }
201
202 if (flags & IP_CMSG_RECVOPTS) {
203 ip_cmsg_recv_opts(msg, skb);
204
205 flags &= ~IP_CMSG_RECVOPTS;
206 if (!flags)
207 return;
208 }
209
210 if (flags & IP_CMSG_RETOPTS) {
211 ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
212
213 flags &= ~IP_CMSG_RETOPTS;
214 if (!flags)
215 return;
216 }
217
218 if (flags & IP_CMSG_PASSSEC) {
219 ip_cmsg_recv_security(msg, skb);
220
221 flags &= ~IP_CMSG_PASSSEC;
222 if (!flags)
223 return;
224 }
225
226 if (flags & IP_CMSG_ORIGDSTADDR) {
227 ip_cmsg_recv_dstaddr(msg, skb);
228
229 flags &= ~IP_CMSG_ORIGDSTADDR;
230 if (!flags)
231 return;
232 }
233
234 if (flags & IP_CMSG_CHECKSUM)
235 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
236
237 if (flags & IP_CMSG_RECVFRAGSIZE)
238 ip_cmsg_recv_fragsize(msg, skb);
239}
240EXPORT_SYMBOL(ip_cmsg_recv_offset);
241
242int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
243 bool allow_ipv6)
244{
245 int err, val;
246 struct cmsghdr *cmsg;
247 struct net *net = sock_net(sk);
248
249 for_each_cmsghdr(cmsg, msg) {
250 if (!CMSG_OK(msg, cmsg))
251 return -EINVAL;
252#if IS_ENABLED(CONFIG_IPV6)
253 if (allow_ipv6 &&
254 cmsg->cmsg_level == SOL_IPV6 &&
255 cmsg->cmsg_type == IPV6_PKTINFO) {
256 struct in6_pktinfo *src_info;
257
258 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
259 return -EINVAL;
260 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
261 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
262 return -EINVAL;
263 if (src_info->ipi6_ifindex)
264 ipc->oif = src_info->ipi6_ifindex;
265 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
266 continue;
267 }
268#endif
269 if (cmsg->cmsg_level == SOL_SOCKET) {
270 err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
271 if (err)
272 return err;
273 continue;
274 }
275
276 if (cmsg->cmsg_level != SOL_IP)
277 continue;
278 switch (cmsg->cmsg_type) {
279 case IP_RETOPTS:
280 err = cmsg->cmsg_len - sizeof(struct cmsghdr);
281
282 /* Our caller is responsible for freeing ipc->opt */
283 err = ip_options_get(net, &ipc->opt,
284 KERNEL_SOCKPTR(CMSG_DATA(cmsg)),
285 err < 40 ? err : 40);
286 if (err)
287 return err;
288 break;
289 case IP_PKTINFO:
290 {
291 struct in_pktinfo *info;
292 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
293 return -EINVAL;
294 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
295 if (info->ipi_ifindex)
296 ipc->oif = info->ipi_ifindex;
297 ipc->addr = info->ipi_spec_dst.s_addr;
298 break;
299 }
300 case IP_TTL:
301 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
302 return -EINVAL;
303 val = *(int *)CMSG_DATA(cmsg);
304 if (val < 1 || val > 255)
305 return -EINVAL;
306 ipc->ttl = val;
307 break;
308 case IP_TOS:
309 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
310 val = *(int *)CMSG_DATA(cmsg);
311 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
312 val = *(u8 *)CMSG_DATA(cmsg);
313 else
314 return -EINVAL;
315 if (val < 0 || val > 255)
316 return -EINVAL;
317 ipc->tos = val;
318 ipc->priority = rt_tos2priority(ipc->tos);
319 break;
320 case IP_PROTOCOL:
321 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
322 return -EINVAL;
323 val = *(int *)CMSG_DATA(cmsg);
324 if (val < 1 || val > 255)
325 return -EINVAL;
326 ipc->protocol = val;
327 break;
328 default:
329 return -EINVAL;
330 }
331 }
332 return 0;
333}
334
335static void ip_ra_destroy_rcu(struct rcu_head *head)
336{
337 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
338
339 sock_put(ra->saved_sk);
340 kfree(ra);
341}
342
343int ip_ra_control(struct sock *sk, unsigned char on,
344 void (*destructor)(struct sock *))
345{
346 struct ip_ra_chain *ra, *new_ra;
347 struct ip_ra_chain __rcu **rap;
348 struct net *net = sock_net(sk);
349
350 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
351 return -EINVAL;
352
353 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
354 if (on && !new_ra)
355 return -ENOMEM;
356
357 mutex_lock(&net->ipv4.ra_mutex);
358 for (rap = &net->ipv4.ra_chain;
359 (ra = rcu_dereference_protected(*rap,
360 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
361 rap = &ra->next) {
362 if (ra->sk == sk) {
363 if (on) {
364 mutex_unlock(&net->ipv4.ra_mutex);
365 kfree(new_ra);
366 return -EADDRINUSE;
367 }
368 /* dont let ip_call_ra_chain() use sk again */
369 ra->sk = NULL;
370 RCU_INIT_POINTER(*rap, ra->next);
371 mutex_unlock(&net->ipv4.ra_mutex);
372
373 if (ra->destructor)
374 ra->destructor(sk);
375 /*
376 * Delay sock_put(sk) and kfree(ra) after one rcu grace
377 * period. This guarantee ip_call_ra_chain() dont need
378 * to mess with socket refcounts.
379 */
380 ra->saved_sk = sk;
381 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
382 return 0;
383 }
384 }
385 if (!new_ra) {
386 mutex_unlock(&net->ipv4.ra_mutex);
387 return -ENOBUFS;
388 }
389 new_ra->sk = sk;
390 new_ra->destructor = destructor;
391
392 RCU_INIT_POINTER(new_ra->next, ra);
393 rcu_assign_pointer(*rap, new_ra);
394 sock_hold(sk);
395 mutex_unlock(&net->ipv4.ra_mutex);
396
397 return 0;
398}
399
400static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb,
401 struct sock_ee_data_rfc4884 *out)
402{
403 switch (icmp_hdr(skb)->type) {
404 case ICMP_DEST_UNREACH:
405 case ICMP_TIME_EXCEEDED:
406 case ICMP_PARAMETERPROB:
407 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr),
408 icmp_hdr(skb)->un.reserved[1] * 4);
409 }
410}
411
412void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
413 __be16 port, u32 info, u8 *payload)
414{
415 struct sock_exterr_skb *serr;
416
417 skb = skb_clone(skb, GFP_ATOMIC);
418 if (!skb)
419 return;
420
421 serr = SKB_EXT_ERR(skb);
422 serr->ee.ee_errno = err;
423 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
424 serr->ee.ee_type = icmp_hdr(skb)->type;
425 serr->ee.ee_code = icmp_hdr(skb)->code;
426 serr->ee.ee_pad = 0;
427 serr->ee.ee_info = info;
428 serr->ee.ee_data = 0;
429 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
430 skb_network_header(skb);
431 serr->port = port;
432
433 if (skb_pull(skb, payload - skb->data)) {
434 if (inet_test_bit(RECVERR_RFC4884, sk))
435 ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
436
437 skb_reset_transport_header(skb);
438 if (sock_queue_err_skb(sk, skb) == 0)
439 return;
440 }
441 kfree_skb(skb);
442}
443EXPORT_SYMBOL_GPL(ip_icmp_error);
444
445void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
446{
447 struct sock_exterr_skb *serr;
448 struct iphdr *iph;
449 struct sk_buff *skb;
450
451 if (!inet_test_bit(RECVERR, sk))
452 return;
453
454 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
455 if (!skb)
456 return;
457
458 skb_put(skb, sizeof(struct iphdr));
459 skb_reset_network_header(skb);
460 iph = ip_hdr(skb);
461 iph->daddr = daddr;
462
463 serr = SKB_EXT_ERR(skb);
464 serr->ee.ee_errno = err;
465 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
466 serr->ee.ee_type = 0;
467 serr->ee.ee_code = 0;
468 serr->ee.ee_pad = 0;
469 serr->ee.ee_info = info;
470 serr->ee.ee_data = 0;
471 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
472 serr->port = port;
473
474 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
475 skb_reset_transport_header(skb);
476
477 if (sock_queue_err_skb(sk, skb))
478 kfree_skb(skb);
479}
480
481/* For some errors we have valid addr_offset even with zero payload and
482 * zero port. Also, addr_offset should be supported if port is set.
483 */
484static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
485{
486 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
487 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
488}
489
490/* IPv4 supports cmsg on all imcp errors and some timestamps
491 *
492 * Timestamp code paths do not initialize the fields expected by cmsg:
493 * the PKTINFO fields in skb->cb[]. Fill those in here.
494 */
495static bool ipv4_datagram_support_cmsg(const struct sock *sk,
496 struct sk_buff *skb,
497 int ee_origin)
498{
499 struct in_pktinfo *info;
500
501 if (ee_origin == SO_EE_ORIGIN_ICMP)
502 return true;
503
504 if (ee_origin == SO_EE_ORIGIN_LOCAL)
505 return false;
506
507 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
508 * timestamp with egress dev. Not possible for packets without iif
509 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
510 */
511 info = PKTINFO_SKB_CB(skb);
512 if (!(READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_CMSG) ||
513 !info->ipi_ifindex)
514 return false;
515
516 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
517 return true;
518}
519
520/*
521 * Handle MSG_ERRQUEUE
522 */
523int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
524{
525 struct sock_exterr_skb *serr;
526 struct sk_buff *skb;
527 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
528 struct {
529 struct sock_extended_err ee;
530 struct sockaddr_in offender;
531 } errhdr;
532 int err;
533 int copied;
534
535 err = -EAGAIN;
536 skb = sock_dequeue_err_skb(sk);
537 if (!skb)
538 goto out;
539
540 copied = skb->len;
541 if (copied > len) {
542 msg->msg_flags |= MSG_TRUNC;
543 copied = len;
544 }
545 err = skb_copy_datagram_msg(skb, 0, msg, copied);
546 if (unlikely(err)) {
547 kfree_skb(skb);
548 return err;
549 }
550 sock_recv_timestamp(msg, sk, skb);
551
552 serr = SKB_EXT_ERR(skb);
553
554 if (sin && ipv4_datagram_support_addr(serr)) {
555 sin->sin_family = AF_INET;
556 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
557 serr->addr_offset);
558 sin->sin_port = serr->port;
559 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
560 *addr_len = sizeof(*sin);
561 }
562
563 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
564 sin = &errhdr.offender;
565 memset(sin, 0, sizeof(*sin));
566
567 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
568 sin->sin_family = AF_INET;
569 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
570 if (inet_cmsg_flags(inet_sk(sk)))
571 ip_cmsg_recv(msg, skb);
572 }
573
574 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
575
576 /* Now we could try to dump offended packet options */
577
578 msg->msg_flags |= MSG_ERRQUEUE;
579 err = copied;
580
581 consume_skb(skb);
582out:
583 return err;
584}
585
586void __ip_sock_set_tos(struct sock *sk, int val)
587{
588 u8 old_tos = inet_sk(sk)->tos;
589
590 if (sk->sk_type == SOCK_STREAM) {
591 val &= ~INET_ECN_MASK;
592 val |= old_tos & INET_ECN_MASK;
593 }
594 if (old_tos != val) {
595 WRITE_ONCE(inet_sk(sk)->tos, val);
596 WRITE_ONCE(sk->sk_priority, rt_tos2priority(val));
597 sk_dst_reset(sk);
598 }
599}
600
601void ip_sock_set_tos(struct sock *sk, int val)
602{
603 sockopt_lock_sock(sk);
604 __ip_sock_set_tos(sk, val);
605 sockopt_release_sock(sk);
606}
607EXPORT_SYMBOL(ip_sock_set_tos);
608
609void ip_sock_set_freebind(struct sock *sk)
610{
611 inet_set_bit(FREEBIND, sk);
612}
613EXPORT_SYMBOL(ip_sock_set_freebind);
614
615void ip_sock_set_recverr(struct sock *sk)
616{
617 inet_set_bit(RECVERR, sk);
618}
619EXPORT_SYMBOL(ip_sock_set_recverr);
620
621int ip_sock_set_mtu_discover(struct sock *sk, int val)
622{
623 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
624 return -EINVAL;
625 WRITE_ONCE(inet_sk(sk)->pmtudisc, val);
626 return 0;
627}
628EXPORT_SYMBOL(ip_sock_set_mtu_discover);
629
630void ip_sock_set_pktinfo(struct sock *sk)
631{
632 inet_set_bit(PKTINFO, sk);
633}
634EXPORT_SYMBOL(ip_sock_set_pktinfo);
635
636/*
637 * Socket option code for IP. This is the end of the line after any
638 * TCP,UDP etc options on an IP socket.
639 */
640static bool setsockopt_needs_rtnl(int optname)
641{
642 switch (optname) {
643 case IP_ADD_MEMBERSHIP:
644 case IP_ADD_SOURCE_MEMBERSHIP:
645 case IP_BLOCK_SOURCE:
646 case IP_DROP_MEMBERSHIP:
647 case IP_DROP_SOURCE_MEMBERSHIP:
648 case IP_MSFILTER:
649 case IP_UNBLOCK_SOURCE:
650 case MCAST_BLOCK_SOURCE:
651 case MCAST_MSFILTER:
652 case MCAST_JOIN_GROUP:
653 case MCAST_JOIN_SOURCE_GROUP:
654 case MCAST_LEAVE_GROUP:
655 case MCAST_LEAVE_SOURCE_GROUP:
656 case MCAST_UNBLOCK_SOURCE:
657 return true;
658 }
659 return false;
660}
661
662static int set_mcast_msfilter(struct sock *sk, int ifindex,
663 int numsrc, int fmode,
664 struct sockaddr_storage *group,
665 struct sockaddr_storage *list)
666{
667 struct ip_msfilter *msf;
668 struct sockaddr_in *psin;
669 int err, i;
670
671 msf = kmalloc(IP_MSFILTER_SIZE(numsrc), GFP_KERNEL);
672 if (!msf)
673 return -ENOBUFS;
674
675 psin = (struct sockaddr_in *)group;
676 if (psin->sin_family != AF_INET)
677 goto Eaddrnotavail;
678 msf->imsf_multiaddr = psin->sin_addr.s_addr;
679 msf->imsf_interface = 0;
680 msf->imsf_fmode = fmode;
681 msf->imsf_numsrc = numsrc;
682 for (i = 0; i < numsrc; ++i) {
683 psin = (struct sockaddr_in *)&list[i];
684
685 if (psin->sin_family != AF_INET)
686 goto Eaddrnotavail;
687 msf->imsf_slist_flex[i] = psin->sin_addr.s_addr;
688 }
689 err = ip_mc_msfilter(sk, msf, ifindex);
690 kfree(msf);
691 return err;
692
693Eaddrnotavail:
694 kfree(msf);
695 return -EADDRNOTAVAIL;
696}
697
698static int copy_group_source_from_sockptr(struct group_source_req *greqs,
699 sockptr_t optval, int optlen)
700{
701 if (in_compat_syscall()) {
702 struct compat_group_source_req gr32;
703
704 if (optlen != sizeof(gr32))
705 return -EINVAL;
706 if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
707 return -EFAULT;
708 greqs->gsr_interface = gr32.gsr_interface;
709 greqs->gsr_group = gr32.gsr_group;
710 greqs->gsr_source = gr32.gsr_source;
711 } else {
712 if (optlen != sizeof(*greqs))
713 return -EINVAL;
714 if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
715 return -EFAULT;
716 }
717
718 return 0;
719}
720
721static int do_mcast_group_source(struct sock *sk, int optname,
722 sockptr_t optval, int optlen)
723{
724 struct group_source_req greqs;
725 struct ip_mreq_source mreqs;
726 struct sockaddr_in *psin;
727 int omode, add, err;
728
729 err = copy_group_source_from_sockptr(&greqs, optval, optlen);
730 if (err)
731 return err;
732
733 if (greqs.gsr_group.ss_family != AF_INET ||
734 greqs.gsr_source.ss_family != AF_INET)
735 return -EADDRNOTAVAIL;
736
737 psin = (struct sockaddr_in *)&greqs.gsr_group;
738 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
739 psin = (struct sockaddr_in *)&greqs.gsr_source;
740 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
741 mreqs.imr_interface = 0; /* use index for mc_source */
742
743 if (optname == MCAST_BLOCK_SOURCE) {
744 omode = MCAST_EXCLUDE;
745 add = 1;
746 } else if (optname == MCAST_UNBLOCK_SOURCE) {
747 omode = MCAST_EXCLUDE;
748 add = 0;
749 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
750 struct ip_mreqn mreq;
751
752 psin = (struct sockaddr_in *)&greqs.gsr_group;
753 mreq.imr_multiaddr = psin->sin_addr;
754 mreq.imr_address.s_addr = 0;
755 mreq.imr_ifindex = greqs.gsr_interface;
756 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
757 if (err && err != -EADDRINUSE)
758 return err;
759 greqs.gsr_interface = mreq.imr_ifindex;
760 omode = MCAST_INCLUDE;
761 add = 1;
762 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
763 omode = MCAST_INCLUDE;
764 add = 0;
765 }
766 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface);
767}
768
769static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
770{
771 struct group_filter *gsf = NULL;
772 int err;
773
774 if (optlen < GROUP_FILTER_SIZE(0))
775 return -EINVAL;
776 if (optlen > READ_ONCE(sock_net(sk)->core.sysctl_optmem_max))
777 return -ENOBUFS;
778
779 gsf = memdup_sockptr(optval, optlen);
780 if (IS_ERR(gsf))
781 return PTR_ERR(gsf);
782
783 /* numsrc >= (4G-140)/128 overflow in 32 bits */
784 err = -ENOBUFS;
785 if (gsf->gf_numsrc >= 0x1ffffff ||
786 gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
787 goto out_free_gsf;
788
789 err = -EINVAL;
790 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
791 goto out_free_gsf;
792
793 err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc,
794 gsf->gf_fmode, &gsf->gf_group,
795 gsf->gf_slist_flex);
796out_free_gsf:
797 kfree(gsf);
798 return err;
799}
800
801static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
802 int optlen)
803{
804 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
805 struct compat_group_filter *gf32;
806 unsigned int n;
807 void *p;
808 int err;
809
810 if (optlen < size0)
811 return -EINVAL;
812 if (optlen > READ_ONCE(sock_net(sk)->core.sysctl_optmem_max) - 4)
813 return -ENOBUFS;
814
815 p = kmalloc(optlen + 4, GFP_KERNEL);
816 if (!p)
817 return -ENOMEM;
818 gf32 = p + 4; /* we want ->gf_group and ->gf_slist_flex aligned */
819
820 err = -EFAULT;
821 if (copy_from_sockptr(gf32, optval, optlen))
822 goto out_free_gsf;
823
824 /* numsrc >= (4G-140)/128 overflow in 32 bits */
825 n = gf32->gf_numsrc;
826 err = -ENOBUFS;
827 if (n >= 0x1ffffff)
828 goto out_free_gsf;
829
830 err = -EINVAL;
831 if (offsetof(struct compat_group_filter, gf_slist_flex[n]) > optlen)
832 goto out_free_gsf;
833
834 /* numsrc >= (4G-140)/128 overflow in 32 bits */
835 err = -ENOBUFS;
836 if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
837 goto out_free_gsf;
838 err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
839 &gf32->gf_group, gf32->gf_slist_flex);
840out_free_gsf:
841 kfree(p);
842 return err;
843}
844
845static int ip_mcast_join_leave(struct sock *sk, int optname,
846 sockptr_t optval, int optlen)
847{
848 struct ip_mreqn mreq = { };
849 struct sockaddr_in *psin;
850 struct group_req greq;
851
852 if (optlen < sizeof(struct group_req))
853 return -EINVAL;
854 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
855 return -EFAULT;
856
857 psin = (struct sockaddr_in *)&greq.gr_group;
858 if (psin->sin_family != AF_INET)
859 return -EINVAL;
860 mreq.imr_multiaddr = psin->sin_addr;
861 mreq.imr_ifindex = greq.gr_interface;
862 if (optname == MCAST_JOIN_GROUP)
863 return ip_mc_join_group(sk, &mreq);
864 return ip_mc_leave_group(sk, &mreq);
865}
866
867static int compat_ip_mcast_join_leave(struct sock *sk, int optname,
868 sockptr_t optval, int optlen)
869{
870 struct compat_group_req greq;
871 struct ip_mreqn mreq = { };
872 struct sockaddr_in *psin;
873
874 if (optlen < sizeof(struct compat_group_req))
875 return -EINVAL;
876 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
877 return -EFAULT;
878
879 psin = (struct sockaddr_in *)&greq.gr_group;
880 if (psin->sin_family != AF_INET)
881 return -EINVAL;
882 mreq.imr_multiaddr = psin->sin_addr;
883 mreq.imr_ifindex = greq.gr_interface;
884
885 if (optname == MCAST_JOIN_GROUP)
886 return ip_mc_join_group(sk, &mreq);
887 return ip_mc_leave_group(sk, &mreq);
888}
889
890DEFINE_STATIC_KEY_FALSE(ip4_min_ttl);
891
892int do_ip_setsockopt(struct sock *sk, int level, int optname,
893 sockptr_t optval, unsigned int optlen)
894{
895 struct inet_sock *inet = inet_sk(sk);
896 struct net *net = sock_net(sk);
897 int val = 0, err;
898 bool needs_rtnl = setsockopt_needs_rtnl(optname);
899
900 switch (optname) {
901 case IP_PKTINFO:
902 case IP_RECVTTL:
903 case IP_RECVOPTS:
904 case IP_RECVTOS:
905 case IP_RETOPTS:
906 case IP_TOS:
907 case IP_TTL:
908 case IP_HDRINCL:
909 case IP_MTU_DISCOVER:
910 case IP_RECVERR:
911 case IP_ROUTER_ALERT:
912 case IP_FREEBIND:
913 case IP_PASSSEC:
914 case IP_TRANSPARENT:
915 case IP_MINTTL:
916 case IP_NODEFRAG:
917 case IP_BIND_ADDRESS_NO_PORT:
918 case IP_UNICAST_IF:
919 case IP_MULTICAST_TTL:
920 case IP_MULTICAST_ALL:
921 case IP_MULTICAST_LOOP:
922 case IP_RECVORIGDSTADDR:
923 case IP_CHECKSUM:
924 case IP_RECVFRAGSIZE:
925 case IP_RECVERR_RFC4884:
926 case IP_LOCAL_PORT_RANGE:
927 if (optlen >= sizeof(int)) {
928 if (copy_from_sockptr(&val, optval, sizeof(val)))
929 return -EFAULT;
930 } else if (optlen >= sizeof(char)) {
931 unsigned char ucval;
932
933 if (copy_from_sockptr(&ucval, optval, sizeof(ucval)))
934 return -EFAULT;
935 val = (int) ucval;
936 }
937 }
938
939 /* If optlen==0, it is equivalent to val == 0 */
940
941 if (optname == IP_ROUTER_ALERT)
942 return ip_ra_control(sk, val ? 1 : 0, NULL);
943 if (ip_mroute_opt(optname))
944 return ip_mroute_setsockopt(sk, optname, optval, optlen);
945
946 /* Handle options that can be set without locking the socket. */
947 switch (optname) {
948 case IP_PKTINFO:
949 inet_assign_bit(PKTINFO, sk, val);
950 return 0;
951 case IP_RECVTTL:
952 inet_assign_bit(TTL, sk, val);
953 return 0;
954 case IP_RECVTOS:
955 inet_assign_bit(TOS, sk, val);
956 return 0;
957 case IP_RECVOPTS:
958 inet_assign_bit(RECVOPTS, sk, val);
959 return 0;
960 case IP_RETOPTS:
961 inet_assign_bit(RETOPTS, sk, val);
962 return 0;
963 case IP_PASSSEC:
964 inet_assign_bit(PASSSEC, sk, val);
965 return 0;
966 case IP_RECVORIGDSTADDR:
967 inet_assign_bit(ORIGDSTADDR, sk, val);
968 return 0;
969 case IP_RECVFRAGSIZE:
970 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
971 return -EINVAL;
972 inet_assign_bit(RECVFRAGSIZE, sk, val);
973 return 0;
974 case IP_RECVERR:
975 inet_assign_bit(RECVERR, sk, val);
976 if (!val)
977 skb_errqueue_purge(&sk->sk_error_queue);
978 return 0;
979 case IP_RECVERR_RFC4884:
980 if (val < 0 || val > 1)
981 return -EINVAL;
982 inet_assign_bit(RECVERR_RFC4884, sk, val);
983 return 0;
984 case IP_FREEBIND:
985 if (optlen < 1)
986 return -EINVAL;
987 inet_assign_bit(FREEBIND, sk, val);
988 return 0;
989 case IP_HDRINCL:
990 if (sk->sk_type != SOCK_RAW)
991 return -ENOPROTOOPT;
992 inet_assign_bit(HDRINCL, sk, val);
993 return 0;
994 case IP_MULTICAST_LOOP:
995 if (optlen < 1)
996 return -EINVAL;
997 inet_assign_bit(MC_LOOP, sk, val);
998 return 0;
999 case IP_MULTICAST_ALL:
1000 if (optlen < 1)
1001 return -EINVAL;
1002 if (val != 0 && val != 1)
1003 return -EINVAL;
1004 inet_assign_bit(MC_ALL, sk, val);
1005 return 0;
1006 case IP_TRANSPARENT:
1007 if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1008 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1009 return -EPERM;
1010 if (optlen < 1)
1011 return -EINVAL;
1012 inet_assign_bit(TRANSPARENT, sk, val);
1013 return 0;
1014 case IP_NODEFRAG:
1015 if (sk->sk_type != SOCK_RAW)
1016 return -ENOPROTOOPT;
1017 inet_assign_bit(NODEFRAG, sk, val);
1018 return 0;
1019 case IP_BIND_ADDRESS_NO_PORT:
1020 inet_assign_bit(BIND_ADDRESS_NO_PORT, sk, val);
1021 return 0;
1022 case IP_TTL:
1023 if (optlen < 1)
1024 return -EINVAL;
1025 if (val != -1 && (val < 1 || val > 255))
1026 return -EINVAL;
1027 WRITE_ONCE(inet->uc_ttl, val);
1028 return 0;
1029 case IP_MINTTL:
1030 if (optlen < 1)
1031 return -EINVAL;
1032 if (val < 0 || val > 255)
1033 return -EINVAL;
1034
1035 if (val)
1036 static_branch_enable(&ip4_min_ttl);
1037
1038 WRITE_ONCE(inet->min_ttl, val);
1039 return 0;
1040 case IP_MULTICAST_TTL:
1041 if (sk->sk_type == SOCK_STREAM)
1042 return -EINVAL;
1043 if (optlen < 1)
1044 return -EINVAL;
1045 if (val == -1)
1046 val = 1;
1047 if (val < 0 || val > 255)
1048 return -EINVAL;
1049 WRITE_ONCE(inet->mc_ttl, val);
1050 return 0;
1051 case IP_MTU_DISCOVER:
1052 return ip_sock_set_mtu_discover(sk, val);
1053 case IP_TOS: /* This sets both TOS and Precedence */
1054 ip_sock_set_tos(sk, val);
1055 return 0;
1056 case IP_LOCAL_PORT_RANGE:
1057 {
1058 u16 lo = val;
1059 u16 hi = val >> 16;
1060
1061 if (optlen != sizeof(u32))
1062 return -EINVAL;
1063 if (lo != 0 && hi != 0 && lo > hi)
1064 return -EINVAL;
1065
1066 WRITE_ONCE(inet->local_port_range, val);
1067 return 0;
1068 }
1069 }
1070
1071 err = 0;
1072 if (needs_rtnl)
1073 rtnl_lock();
1074 sockopt_lock_sock(sk);
1075
1076 switch (optname) {
1077 case IP_OPTIONS:
1078 {
1079 struct ip_options_rcu *old, *opt = NULL;
1080
1081 if (optlen > 40)
1082 goto e_inval;
1083 err = ip_options_get(sock_net(sk), &opt, optval, optlen);
1084 if (err)
1085 break;
1086 old = rcu_dereference_protected(inet->inet_opt,
1087 lockdep_sock_is_held(sk));
1088 if (inet_test_bit(IS_ICSK, sk)) {
1089 struct inet_connection_sock *icsk = inet_csk(sk);
1090#if IS_ENABLED(CONFIG_IPV6)
1091 if (sk->sk_family == PF_INET ||
1092 (!((1 << sk->sk_state) &
1093 (TCPF_LISTEN | TCPF_CLOSE)) &&
1094 inet->inet_daddr != LOOPBACK4_IPV6)) {
1095#endif
1096 if (old)
1097 icsk->icsk_ext_hdr_len -= old->opt.optlen;
1098 if (opt)
1099 icsk->icsk_ext_hdr_len += opt->opt.optlen;
1100 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
1101#if IS_ENABLED(CONFIG_IPV6)
1102 }
1103#endif
1104 }
1105 rcu_assign_pointer(inet->inet_opt, opt);
1106 if (old)
1107 kfree_rcu(old, rcu);
1108 break;
1109 }
1110 case IP_CHECKSUM:
1111 if (val) {
1112 if (!(inet_test_bit(CHECKSUM, sk))) {
1113 inet_inc_convert_csum(sk);
1114 inet_set_bit(CHECKSUM, sk);
1115 }
1116 } else {
1117 if (inet_test_bit(CHECKSUM, sk)) {
1118 inet_dec_convert_csum(sk);
1119 inet_clear_bit(CHECKSUM, sk);
1120 }
1121 }
1122 break;
1123 case IP_UNICAST_IF:
1124 {
1125 struct net_device *dev = NULL;
1126 int ifindex;
1127 int midx;
1128
1129 if (optlen != sizeof(int))
1130 goto e_inval;
1131
1132 ifindex = (__force int)ntohl((__force __be32)val);
1133 if (ifindex == 0) {
1134 WRITE_ONCE(inet->uc_index, 0);
1135 err = 0;
1136 break;
1137 }
1138
1139 dev = dev_get_by_index(sock_net(sk), ifindex);
1140 err = -EADDRNOTAVAIL;
1141 if (!dev)
1142 break;
1143
1144 midx = l3mdev_master_ifindex(dev);
1145 dev_put(dev);
1146
1147 err = -EINVAL;
1148 if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
1149 break;
1150
1151 WRITE_ONCE(inet->uc_index, ifindex);
1152 err = 0;
1153 break;
1154 }
1155 case IP_MULTICAST_IF:
1156 {
1157 struct ip_mreqn mreq;
1158 struct net_device *dev = NULL;
1159 int midx;
1160
1161 if (sk->sk_type == SOCK_STREAM)
1162 goto e_inval;
1163 /*
1164 * Check the arguments are allowable
1165 */
1166
1167 if (optlen < sizeof(struct in_addr))
1168 goto e_inval;
1169
1170 err = -EFAULT;
1171 if (optlen >= sizeof(struct ip_mreqn)) {
1172 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1173 break;
1174 } else {
1175 memset(&mreq, 0, sizeof(mreq));
1176 if (optlen >= sizeof(struct ip_mreq)) {
1177 if (copy_from_sockptr(&mreq, optval,
1178 sizeof(struct ip_mreq)))
1179 break;
1180 } else if (optlen >= sizeof(struct in_addr)) {
1181 if (copy_from_sockptr(&mreq.imr_address, optval,
1182 sizeof(struct in_addr)))
1183 break;
1184 }
1185 }
1186
1187 if (!mreq.imr_ifindex) {
1188 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
1189 WRITE_ONCE(inet->mc_index, 0);
1190 WRITE_ONCE(inet->mc_addr, 0);
1191 err = 0;
1192 break;
1193 }
1194 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
1195 if (dev)
1196 mreq.imr_ifindex = dev->ifindex;
1197 } else
1198 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
1199
1200
1201 err = -EADDRNOTAVAIL;
1202 if (!dev)
1203 break;
1204
1205 midx = l3mdev_master_ifindex(dev);
1206
1207 dev_put(dev);
1208
1209 err = -EINVAL;
1210 if (sk->sk_bound_dev_if &&
1211 mreq.imr_ifindex != sk->sk_bound_dev_if &&
1212 midx != sk->sk_bound_dev_if)
1213 break;
1214
1215 WRITE_ONCE(inet->mc_index, mreq.imr_ifindex);
1216 WRITE_ONCE(inet->mc_addr, mreq.imr_address.s_addr);
1217 err = 0;
1218 break;
1219 }
1220
1221 case IP_ADD_MEMBERSHIP:
1222 case IP_DROP_MEMBERSHIP:
1223 {
1224 struct ip_mreqn mreq;
1225
1226 err = -EPROTO;
1227 if (inet_test_bit(IS_ICSK, sk))
1228 break;
1229
1230 if (optlen < sizeof(struct ip_mreq))
1231 goto e_inval;
1232 err = -EFAULT;
1233 if (optlen >= sizeof(struct ip_mreqn)) {
1234 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1235 break;
1236 } else {
1237 memset(&mreq, 0, sizeof(mreq));
1238 if (copy_from_sockptr(&mreq, optval,
1239 sizeof(struct ip_mreq)))
1240 break;
1241 }
1242
1243 if (optname == IP_ADD_MEMBERSHIP)
1244 err = ip_mc_join_group(sk, &mreq);
1245 else
1246 err = ip_mc_leave_group(sk, &mreq);
1247 break;
1248 }
1249 case IP_MSFILTER:
1250 {
1251 struct ip_msfilter *msf;
1252
1253 if (optlen < IP_MSFILTER_SIZE(0))
1254 goto e_inval;
1255 if (optlen > READ_ONCE(net->core.sysctl_optmem_max)) {
1256 err = -ENOBUFS;
1257 break;
1258 }
1259 msf = memdup_sockptr(optval, optlen);
1260 if (IS_ERR(msf)) {
1261 err = PTR_ERR(msf);
1262 break;
1263 }
1264 /* numsrc >= (1G-4) overflow in 32 bits */
1265 if (msf->imsf_numsrc >= 0x3ffffffcU ||
1266 msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
1267 kfree(msf);
1268 err = -ENOBUFS;
1269 break;
1270 }
1271 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
1272 kfree(msf);
1273 err = -EINVAL;
1274 break;
1275 }
1276 err = ip_mc_msfilter(sk, msf, 0);
1277 kfree(msf);
1278 break;
1279 }
1280 case IP_BLOCK_SOURCE:
1281 case IP_UNBLOCK_SOURCE:
1282 case IP_ADD_SOURCE_MEMBERSHIP:
1283 case IP_DROP_SOURCE_MEMBERSHIP:
1284 {
1285 struct ip_mreq_source mreqs;
1286 int omode, add;
1287
1288 if (optlen != sizeof(struct ip_mreq_source))
1289 goto e_inval;
1290 if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) {
1291 err = -EFAULT;
1292 break;
1293 }
1294 if (optname == IP_BLOCK_SOURCE) {
1295 omode = MCAST_EXCLUDE;
1296 add = 1;
1297 } else if (optname == IP_UNBLOCK_SOURCE) {
1298 omode = MCAST_EXCLUDE;
1299 add = 0;
1300 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
1301 struct ip_mreqn mreq;
1302
1303 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
1304 mreq.imr_address.s_addr = mreqs.imr_interface;
1305 mreq.imr_ifindex = 0;
1306 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1307 if (err && err != -EADDRINUSE)
1308 break;
1309 omode = MCAST_INCLUDE;
1310 add = 1;
1311 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
1312 omode = MCAST_INCLUDE;
1313 add = 0;
1314 }
1315 err = ip_mc_source(add, omode, sk, &mreqs, 0);
1316 break;
1317 }
1318 case MCAST_JOIN_GROUP:
1319 case MCAST_LEAVE_GROUP:
1320 if (in_compat_syscall())
1321 err = compat_ip_mcast_join_leave(sk, optname, optval,
1322 optlen);
1323 else
1324 err = ip_mcast_join_leave(sk, optname, optval, optlen);
1325 break;
1326 case MCAST_JOIN_SOURCE_GROUP:
1327 case MCAST_LEAVE_SOURCE_GROUP:
1328 case MCAST_BLOCK_SOURCE:
1329 case MCAST_UNBLOCK_SOURCE:
1330 err = do_mcast_group_source(sk, optname, optval, optlen);
1331 break;
1332 case MCAST_MSFILTER:
1333 if (in_compat_syscall())
1334 err = compat_ip_set_mcast_msfilter(sk, optval, optlen);
1335 else
1336 err = ip_set_mcast_msfilter(sk, optval, optlen);
1337 break;
1338 case IP_IPSEC_POLICY:
1339 case IP_XFRM_POLICY:
1340 err = -EPERM;
1341 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1342 break;
1343 err = xfrm_user_policy(sk, optname, optval, optlen);
1344 break;
1345
1346 default:
1347 err = -ENOPROTOOPT;
1348 break;
1349 }
1350 sockopt_release_sock(sk);
1351 if (needs_rtnl)
1352 rtnl_unlock();
1353 return err;
1354
1355e_inval:
1356 sockopt_release_sock(sk);
1357 if (needs_rtnl)
1358 rtnl_unlock();
1359 return -EINVAL;
1360}
1361
1362/**
1363 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1364 * @sk: socket
1365 * @skb: buffer
1366 * @drop_dst: if true, drops skb dst
1367 *
1368 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1369 * destination in skb->cb[] before dst drop.
1370 * This way, receiver doesn't make cache line misses to read rtable.
1371 */
1372void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst)
1373{
1374 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1375 bool prepare = inet_test_bit(PKTINFO, sk) ||
1376 ipv6_sk_rxinfo(sk);
1377
1378 if (prepare && skb_rtable(skb)) {
1379 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1380 * which has interface index (iif) as the first member of the
1381 * underlying inet{6}_skb_parm struct. This code then overlays
1382 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1383 * element so the iif is picked up from the prior IPCB. If iif
1384 * is the loopback interface, then return the sending interface
1385 * (e.g., process binds socket to eth0 for Tx which is
1386 * redirected to loopback in the rtable/dst).
1387 */
1388 struct rtable *rt = skb_rtable(skb);
1389 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
1390
1391 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1392 pktinfo->ipi_ifindex = inet_iif(skb);
1393 else if (l3slave && rt && rt->rt_iif)
1394 pktinfo->ipi_ifindex = rt->rt_iif;
1395
1396 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1397 } else {
1398 pktinfo->ipi_ifindex = 0;
1399 pktinfo->ipi_spec_dst.s_addr = 0;
1400 }
1401 if (drop_dst)
1402 skb_dst_drop(skb);
1403}
1404
1405int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1406 unsigned int optlen)
1407{
1408 int err;
1409
1410 if (level != SOL_IP)
1411 return -ENOPROTOOPT;
1412
1413 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1414#ifdef CONFIG_NETFILTER
1415 /* we need to exclude all possible ENOPROTOOPTs except default case */
1416 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1417 optname != IP_IPSEC_POLICY &&
1418 optname != IP_XFRM_POLICY &&
1419 !ip_mroute_opt(optname))
1420 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1421#endif
1422 return err;
1423}
1424EXPORT_SYMBOL(ip_setsockopt);
1425
1426/*
1427 * Get the options. Note for future reference. The GET of IP options gets
1428 * the _received_ ones. The set sets the _sent_ ones.
1429 */
1430
1431static bool getsockopt_needs_rtnl(int optname)
1432{
1433 switch (optname) {
1434 case IP_MSFILTER:
1435 case MCAST_MSFILTER:
1436 return true;
1437 }
1438 return false;
1439}
1440
1441static int ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1442 sockptr_t optlen, int len)
1443{
1444 const int size0 = offsetof(struct group_filter, gf_slist_flex);
1445 struct group_filter gsf;
1446 int num, gsf_size;
1447 int err;
1448
1449 if (len < size0)
1450 return -EINVAL;
1451 if (copy_from_sockptr(&gsf, optval, size0))
1452 return -EFAULT;
1453
1454 num = gsf.gf_numsrc;
1455 err = ip_mc_gsfget(sk, &gsf, optval,
1456 offsetof(struct group_filter, gf_slist_flex));
1457 if (err)
1458 return err;
1459 if (gsf.gf_numsrc < num)
1460 num = gsf.gf_numsrc;
1461 gsf_size = GROUP_FILTER_SIZE(num);
1462 if (copy_to_sockptr(optlen, &gsf_size, sizeof(int)) ||
1463 copy_to_sockptr(optval, &gsf, size0))
1464 return -EFAULT;
1465 return 0;
1466}
1467
1468static int compat_ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1469 sockptr_t optlen, int len)
1470{
1471 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
1472 struct compat_group_filter gf32;
1473 struct group_filter gf;
1474 int num;
1475 int err;
1476
1477 if (len < size0)
1478 return -EINVAL;
1479 if (copy_from_sockptr(&gf32, optval, size0))
1480 return -EFAULT;
1481
1482 gf.gf_interface = gf32.gf_interface;
1483 gf.gf_fmode = gf32.gf_fmode;
1484 num = gf.gf_numsrc = gf32.gf_numsrc;
1485 gf.gf_group = gf32.gf_group;
1486
1487 err = ip_mc_gsfget(sk, &gf, optval,
1488 offsetof(struct compat_group_filter, gf_slist_flex));
1489 if (err)
1490 return err;
1491 if (gf.gf_numsrc < num)
1492 num = gf.gf_numsrc;
1493 len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
1494 if (copy_to_sockptr(optlen, &len, sizeof(int)) ||
1495 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode),
1496 &gf.gf_fmode, sizeof(gf.gf_fmode)) ||
1497 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc),
1498 &gf.gf_numsrc, sizeof(gf.gf_numsrc)))
1499 return -EFAULT;
1500 return 0;
1501}
1502
1503int do_ip_getsockopt(struct sock *sk, int level, int optname,
1504 sockptr_t optval, sockptr_t optlen)
1505{
1506 struct inet_sock *inet = inet_sk(sk);
1507 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1508 int val, err = 0;
1509 int len;
1510
1511 if (level != SOL_IP)
1512 return -EOPNOTSUPP;
1513
1514 if (ip_mroute_opt(optname))
1515 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1516
1517 if (copy_from_sockptr(&len, optlen, sizeof(int)))
1518 return -EFAULT;
1519 if (len < 0)
1520 return -EINVAL;
1521
1522 /* Handle options that can be read without locking the socket. */
1523 switch (optname) {
1524 case IP_PKTINFO:
1525 val = inet_test_bit(PKTINFO, sk);
1526 goto copyval;
1527 case IP_RECVTTL:
1528 val = inet_test_bit(TTL, sk);
1529 goto copyval;
1530 case IP_RECVTOS:
1531 val = inet_test_bit(TOS, sk);
1532 goto copyval;
1533 case IP_RECVOPTS:
1534 val = inet_test_bit(RECVOPTS, sk);
1535 goto copyval;
1536 case IP_RETOPTS:
1537 val = inet_test_bit(RETOPTS, sk);
1538 goto copyval;
1539 case IP_PASSSEC:
1540 val = inet_test_bit(PASSSEC, sk);
1541 goto copyval;
1542 case IP_RECVORIGDSTADDR:
1543 val = inet_test_bit(ORIGDSTADDR, sk);
1544 goto copyval;
1545 case IP_CHECKSUM:
1546 val = inet_test_bit(CHECKSUM, sk);
1547 goto copyval;
1548 case IP_RECVFRAGSIZE:
1549 val = inet_test_bit(RECVFRAGSIZE, sk);
1550 goto copyval;
1551 case IP_RECVERR:
1552 val = inet_test_bit(RECVERR, sk);
1553 goto copyval;
1554 case IP_RECVERR_RFC4884:
1555 val = inet_test_bit(RECVERR_RFC4884, sk);
1556 goto copyval;
1557 case IP_FREEBIND:
1558 val = inet_test_bit(FREEBIND, sk);
1559 goto copyval;
1560 case IP_HDRINCL:
1561 val = inet_test_bit(HDRINCL, sk);
1562 goto copyval;
1563 case IP_MULTICAST_LOOP:
1564 val = inet_test_bit(MC_LOOP, sk);
1565 goto copyval;
1566 case IP_MULTICAST_ALL:
1567 val = inet_test_bit(MC_ALL, sk);
1568 goto copyval;
1569 case IP_TRANSPARENT:
1570 val = inet_test_bit(TRANSPARENT, sk);
1571 goto copyval;
1572 case IP_NODEFRAG:
1573 val = inet_test_bit(NODEFRAG, sk);
1574 goto copyval;
1575 case IP_BIND_ADDRESS_NO_PORT:
1576 val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
1577 goto copyval;
1578 case IP_TTL:
1579 val = READ_ONCE(inet->uc_ttl);
1580 if (val < 0)
1581 val = READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_default_ttl);
1582 goto copyval;
1583 case IP_MINTTL:
1584 val = READ_ONCE(inet->min_ttl);
1585 goto copyval;
1586 case IP_MULTICAST_TTL:
1587 val = READ_ONCE(inet->mc_ttl);
1588 goto copyval;
1589 case IP_MTU_DISCOVER:
1590 val = READ_ONCE(inet->pmtudisc);
1591 goto copyval;
1592 case IP_TOS:
1593 val = READ_ONCE(inet->tos);
1594 goto copyval;
1595 case IP_OPTIONS:
1596 {
1597 unsigned char optbuf[sizeof(struct ip_options)+40];
1598 struct ip_options *opt = (struct ip_options *)optbuf;
1599 struct ip_options_rcu *inet_opt;
1600
1601 rcu_read_lock();
1602 inet_opt = rcu_dereference(inet->inet_opt);
1603 opt->optlen = 0;
1604 if (inet_opt)
1605 memcpy(optbuf, &inet_opt->opt,
1606 sizeof(struct ip_options) +
1607 inet_opt->opt.optlen);
1608 rcu_read_unlock();
1609
1610 if (opt->optlen == 0) {
1611 len = 0;
1612 return copy_to_sockptr(optlen, &len, sizeof(int));
1613 }
1614
1615 ip_options_undo(opt);
1616
1617 len = min_t(unsigned int, len, opt->optlen);
1618 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1619 return -EFAULT;
1620 if (copy_to_sockptr(optval, opt->__data, len))
1621 return -EFAULT;
1622 return 0;
1623 }
1624 case IP_MTU:
1625 {
1626 struct dst_entry *dst;
1627 val = 0;
1628 dst = sk_dst_get(sk);
1629 if (dst) {
1630 val = dst_mtu(dst);
1631 dst_release(dst);
1632 }
1633 if (!val)
1634 return -ENOTCONN;
1635 goto copyval;
1636 }
1637 case IP_PKTOPTIONS:
1638 {
1639 struct msghdr msg;
1640
1641 if (sk->sk_type != SOCK_STREAM)
1642 return -ENOPROTOOPT;
1643
1644 if (optval.is_kernel) {
1645 msg.msg_control_is_user = false;
1646 msg.msg_control = optval.kernel;
1647 } else {
1648 msg.msg_control_is_user = true;
1649 msg.msg_control_user = optval.user;
1650 }
1651 msg.msg_controllen = len;
1652 msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
1653
1654 if (inet_test_bit(PKTINFO, sk)) {
1655 struct in_pktinfo info;
1656
1657 info.ipi_addr.s_addr = READ_ONCE(inet->inet_rcv_saddr);
1658 info.ipi_spec_dst.s_addr = READ_ONCE(inet->inet_rcv_saddr);
1659 info.ipi_ifindex = READ_ONCE(inet->mc_index);
1660 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1661 }
1662 if (inet_test_bit(TTL, sk)) {
1663 int hlim = READ_ONCE(inet->mc_ttl);
1664
1665 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1666 }
1667 if (inet_test_bit(TOS, sk)) {
1668 int tos = READ_ONCE(inet->rcv_tos);
1669 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1670 }
1671 len -= msg.msg_controllen;
1672 return copy_to_sockptr(optlen, &len, sizeof(int));
1673 }
1674 case IP_UNICAST_IF:
1675 val = (__force int)htonl((__u32) READ_ONCE(inet->uc_index));
1676 goto copyval;
1677 case IP_MULTICAST_IF:
1678 {
1679 struct in_addr addr;
1680 len = min_t(unsigned int, len, sizeof(struct in_addr));
1681 addr.s_addr = READ_ONCE(inet->mc_addr);
1682
1683 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1684 return -EFAULT;
1685 if (copy_to_sockptr(optval, &addr, len))
1686 return -EFAULT;
1687 return 0;
1688 }
1689 case IP_LOCAL_PORT_RANGE:
1690 val = READ_ONCE(inet->local_port_range);
1691 goto copyval;
1692 }
1693
1694 if (needs_rtnl)
1695 rtnl_lock();
1696 sockopt_lock_sock(sk);
1697
1698 switch (optname) {
1699 case IP_MSFILTER:
1700 {
1701 struct ip_msfilter msf;
1702
1703 if (len < IP_MSFILTER_SIZE(0)) {
1704 err = -EINVAL;
1705 goto out;
1706 }
1707 if (copy_from_sockptr(&msf, optval, IP_MSFILTER_SIZE(0))) {
1708 err = -EFAULT;
1709 goto out;
1710 }
1711 err = ip_mc_msfget(sk, &msf, optval, optlen);
1712 goto out;
1713 }
1714 case MCAST_MSFILTER:
1715 if (in_compat_syscall())
1716 err = compat_ip_get_mcast_msfilter(sk, optval, optlen,
1717 len);
1718 else
1719 err = ip_get_mcast_msfilter(sk, optval, optlen, len);
1720 goto out;
1721 case IP_PROTOCOL:
1722 val = inet_sk(sk)->inet_num;
1723 break;
1724 default:
1725 sockopt_release_sock(sk);
1726 return -ENOPROTOOPT;
1727 }
1728 sockopt_release_sock(sk);
1729copyval:
1730 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1731 unsigned char ucval = (unsigned char)val;
1732 len = 1;
1733 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1734 return -EFAULT;
1735 if (copy_to_sockptr(optval, &ucval, 1))
1736 return -EFAULT;
1737 } else {
1738 len = min_t(unsigned int, sizeof(int), len);
1739 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1740 return -EFAULT;
1741 if (copy_to_sockptr(optval, &val, len))
1742 return -EFAULT;
1743 }
1744 return 0;
1745
1746out:
1747 sockopt_release_sock(sk);
1748 if (needs_rtnl)
1749 rtnl_unlock();
1750 return err;
1751}
1752
1753int ip_getsockopt(struct sock *sk, int level,
1754 int optname, char __user *optval, int __user *optlen)
1755{
1756 int err;
1757
1758 err = do_ip_getsockopt(sk, level, optname,
1759 USER_SOCKPTR(optval), USER_SOCKPTR(optlen));
1760
1761#ifdef CONFIG_NETFILTER
1762 /* we need to exclude all possible ENOPROTOOPTs except default case */
1763 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1764 !ip_mroute_opt(optname)) {
1765 int len;
1766
1767 if (get_user(len, optlen))
1768 return -EFAULT;
1769
1770 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1771 if (err >= 0)
1772 err = put_user(len, optlen);
1773 return err;
1774 }
1775#endif
1776 return err;
1777}
1778EXPORT_SYMBOL(ip_getsockopt);