Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Multicast support for IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10 */
11
12/* Changes:
13 *
14 * yoshfuji : fix format of router-alert option
15 * YOSHIFUJI Hideaki @USAGI:
16 * Fixed source address for MLD message based on
17 * <draft-ietf-magma-mld-source-05.txt>.
18 * YOSHIFUJI Hideaki @USAGI:
19 * - Ignore Queries for invalid addresses.
20 * - MLD for link-local addresses.
21 * David L Stevens <dlstevens@us.ibm.com>:
22 * - MLDv2 support
23 */
24
25#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/string.h>
29#include <linux/socket.h>
30#include <linux/sockios.h>
31#include <linux/jiffies.h>
32#include <linux/times.h>
33#include <linux/net.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/if_arp.h>
38#include <linux/route.h>
39#include <linux/init.h>
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/slab.h>
43#include <linux/pkt_sched.h>
44#include <net/mld.h>
45
46#include <linux/netfilter.h>
47#include <linux/netfilter_ipv6.h>
48
49#include <net/net_namespace.h>
50#include <net/sock.h>
51#include <net/snmp.h>
52
53#include <net/ipv6.h>
54#include <net/protocol.h>
55#include <net/if_inet6.h>
56#include <net/ndisc.h>
57#include <net/addrconf.h>
58#include <net/ip6_route.h>
59#include <net/inet_common.h>
60
61#include <net/ip6_checksum.h>
62
63/* Ensure that we have struct in6_addr aligned on 32bit word. */
64static int __mld2_query_bugs[] __attribute__((__unused__)) = {
65 BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
66 BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
67 BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
68};
69
70static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
71
72static void igmp6_join_group(struct ifmcaddr6 *ma);
73static void igmp6_leave_group(struct ifmcaddr6 *ma);
74static void igmp6_timer_handler(struct timer_list *t);
75
76static void mld_gq_timer_expire(struct timer_list *t);
77static void mld_ifc_timer_expire(struct timer_list *t);
78static void mld_ifc_event(struct inet6_dev *idev);
79static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
80static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
81static void mld_clear_delrec(struct inet6_dev *idev);
82static bool mld_in_v1_mode(const struct inet6_dev *idev);
83static int sf_setstate(struct ifmcaddr6 *pmc);
84static void sf_markstate(struct ifmcaddr6 *pmc);
85static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
86static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
87 int sfmode, int sfcount, const struct in6_addr *psfsrc,
88 int delta);
89static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
90 int sfmode, int sfcount, const struct in6_addr *psfsrc,
91 int delta);
92static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
93 struct inet6_dev *idev);
94static int __ipv6_dev_mc_inc(struct net_device *dev,
95 const struct in6_addr *addr, unsigned int mode);
96
97#define MLD_QRV_DEFAULT 2
98/* RFC3810, 9.2. Query Interval */
99#define MLD_QI_DEFAULT (125 * HZ)
100/* RFC3810, 9.3. Query Response Interval */
101#define MLD_QRI_DEFAULT (10 * HZ)
102
103/* RFC3810, 8.1 Query Version Distinctions */
104#define MLD_V1_QUERY_LEN 24
105#define MLD_V2_QUERY_LEN_MIN 28
106
107#define IPV6_MLD_MAX_MSF 64
108
109int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
110int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
111
112/*
113 * socket join on multicast group
114 */
115
116#define for_each_pmc_rcu(np, pmc) \
117 for (pmc = rcu_dereference(np->ipv6_mc_list); \
118 pmc != NULL; \
119 pmc = rcu_dereference(pmc->next))
120
121static int unsolicited_report_interval(struct inet6_dev *idev)
122{
123 int iv;
124
125 if (mld_in_v1_mode(idev))
126 iv = idev->cnf.mldv1_unsolicited_report_interval;
127 else
128 iv = idev->cnf.mldv2_unsolicited_report_interval;
129
130 return iv > 0 ? iv : 1;
131}
132
133static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
134 const struct in6_addr *addr, unsigned int mode)
135{
136 struct net_device *dev = NULL;
137 struct ipv6_mc_socklist *mc_lst;
138 struct ipv6_pinfo *np = inet6_sk(sk);
139 struct net *net = sock_net(sk);
140 int err;
141
142 ASSERT_RTNL();
143
144 if (!ipv6_addr_is_multicast(addr))
145 return -EINVAL;
146
147 rcu_read_lock();
148 for_each_pmc_rcu(np, mc_lst) {
149 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
150 ipv6_addr_equal(&mc_lst->addr, addr)) {
151 rcu_read_unlock();
152 return -EADDRINUSE;
153 }
154 }
155 rcu_read_unlock();
156
157 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
158
159 if (!mc_lst)
160 return -ENOMEM;
161
162 mc_lst->next = NULL;
163 mc_lst->addr = *addr;
164
165 if (ifindex == 0) {
166 struct rt6_info *rt;
167 rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
168 if (rt) {
169 dev = rt->dst.dev;
170 ip6_rt_put(rt);
171 }
172 } else
173 dev = __dev_get_by_index(net, ifindex);
174
175 if (!dev) {
176 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
177 return -ENODEV;
178 }
179
180 mc_lst->ifindex = dev->ifindex;
181 mc_lst->sfmode = mode;
182 rwlock_init(&mc_lst->sflock);
183 mc_lst->sflist = NULL;
184
185 /*
186 * now add/increase the group membership on the device
187 */
188
189 err = __ipv6_dev_mc_inc(dev, addr, mode);
190
191 if (err) {
192 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
193 return err;
194 }
195
196 mc_lst->next = np->ipv6_mc_list;
197 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
198
199 return 0;
200}
201
202int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
203{
204 return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
205}
206EXPORT_SYMBOL(ipv6_sock_mc_join);
207
208int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
209 const struct in6_addr *addr, unsigned int mode)
210{
211 return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
212}
213
214/*
215 * socket leave on multicast group
216 */
217int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
218{
219 struct ipv6_pinfo *np = inet6_sk(sk);
220 struct ipv6_mc_socklist *mc_lst;
221 struct ipv6_mc_socklist __rcu **lnk;
222 struct net *net = sock_net(sk);
223
224 ASSERT_RTNL();
225
226 if (!ipv6_addr_is_multicast(addr))
227 return -EINVAL;
228
229 for (lnk = &np->ipv6_mc_list;
230 (mc_lst = rtnl_dereference(*lnk)) != NULL;
231 lnk = &mc_lst->next) {
232 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
233 ipv6_addr_equal(&mc_lst->addr, addr)) {
234 struct net_device *dev;
235
236 *lnk = mc_lst->next;
237
238 dev = __dev_get_by_index(net, mc_lst->ifindex);
239 if (dev) {
240 struct inet6_dev *idev = __in6_dev_get(dev);
241
242 (void) ip6_mc_leave_src(sk, mc_lst, idev);
243 if (idev)
244 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
245 } else
246 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
247
248 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
249 kfree_rcu(mc_lst, rcu);
250 return 0;
251 }
252 }
253
254 return -EADDRNOTAVAIL;
255}
256EXPORT_SYMBOL(ipv6_sock_mc_drop);
257
258/* called with rcu_read_lock() */
259static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
260 const struct in6_addr *group,
261 int ifindex)
262{
263 struct net_device *dev = NULL;
264 struct inet6_dev *idev = NULL;
265
266 if (ifindex == 0) {
267 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
268
269 if (rt) {
270 dev = rt->dst.dev;
271 ip6_rt_put(rt);
272 }
273 } else
274 dev = dev_get_by_index_rcu(net, ifindex);
275
276 if (!dev)
277 return NULL;
278 idev = __in6_dev_get(dev);
279 if (!idev)
280 return NULL;
281 read_lock_bh(&idev->lock);
282 if (idev->dead) {
283 read_unlock_bh(&idev->lock);
284 return NULL;
285 }
286 return idev;
287}
288
289void __ipv6_sock_mc_close(struct sock *sk)
290{
291 struct ipv6_pinfo *np = inet6_sk(sk);
292 struct ipv6_mc_socklist *mc_lst;
293 struct net *net = sock_net(sk);
294
295 ASSERT_RTNL();
296
297 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
298 struct net_device *dev;
299
300 np->ipv6_mc_list = mc_lst->next;
301
302 dev = __dev_get_by_index(net, mc_lst->ifindex);
303 if (dev) {
304 struct inet6_dev *idev = __in6_dev_get(dev);
305
306 (void) ip6_mc_leave_src(sk, mc_lst, idev);
307 if (idev)
308 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
309 } else
310 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
311
312 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
313 kfree_rcu(mc_lst, rcu);
314 }
315}
316
317void ipv6_sock_mc_close(struct sock *sk)
318{
319 struct ipv6_pinfo *np = inet6_sk(sk);
320
321 if (!rcu_access_pointer(np->ipv6_mc_list))
322 return;
323 rtnl_lock();
324 __ipv6_sock_mc_close(sk);
325 rtnl_unlock();
326}
327
328int ip6_mc_source(int add, int omode, struct sock *sk,
329 struct group_source_req *pgsr)
330{
331 struct in6_addr *source, *group;
332 struct ipv6_mc_socklist *pmc;
333 struct inet6_dev *idev;
334 struct ipv6_pinfo *inet6 = inet6_sk(sk);
335 struct ip6_sf_socklist *psl;
336 struct net *net = sock_net(sk);
337 int i, j, rv;
338 int leavegroup = 0;
339 int pmclocked = 0;
340 int err;
341
342 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
343 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
344
345 if (!ipv6_addr_is_multicast(group))
346 return -EINVAL;
347
348 rcu_read_lock();
349 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
350 if (!idev) {
351 rcu_read_unlock();
352 return -ENODEV;
353 }
354
355 err = -EADDRNOTAVAIL;
356
357 for_each_pmc_rcu(inet6, pmc) {
358 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
359 continue;
360 if (ipv6_addr_equal(&pmc->addr, group))
361 break;
362 }
363 if (!pmc) { /* must have a prior join */
364 err = -EINVAL;
365 goto done;
366 }
367 /* if a source filter was set, must be the same mode as before */
368 if (pmc->sflist) {
369 if (pmc->sfmode != omode) {
370 err = -EINVAL;
371 goto done;
372 }
373 } else if (pmc->sfmode != omode) {
374 /* allow mode switches for empty-set filters */
375 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
376 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
377 pmc->sfmode = omode;
378 }
379
380 write_lock(&pmc->sflock);
381 pmclocked = 1;
382
383 psl = pmc->sflist;
384 if (!add) {
385 if (!psl)
386 goto done; /* err = -EADDRNOTAVAIL */
387 rv = !0;
388 for (i = 0; i < psl->sl_count; i++) {
389 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
390 if (rv == 0)
391 break;
392 }
393 if (rv) /* source not found */
394 goto done; /* err = -EADDRNOTAVAIL */
395
396 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
397 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
398 leavegroup = 1;
399 goto done;
400 }
401
402 /* update the interface filter */
403 ip6_mc_del_src(idev, group, omode, 1, source, 1);
404
405 for (j = i+1; j < psl->sl_count; j++)
406 psl->sl_addr[j-1] = psl->sl_addr[j];
407 psl->sl_count--;
408 err = 0;
409 goto done;
410 }
411 /* else, add a new source to the filter */
412
413 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
414 err = -ENOBUFS;
415 goto done;
416 }
417 if (!psl || psl->sl_count == psl->sl_max) {
418 struct ip6_sf_socklist *newpsl;
419 int count = IP6_SFBLOCK;
420
421 if (psl)
422 count += psl->sl_max;
423 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
424 if (!newpsl) {
425 err = -ENOBUFS;
426 goto done;
427 }
428 newpsl->sl_max = count;
429 newpsl->sl_count = count - IP6_SFBLOCK;
430 if (psl) {
431 for (i = 0; i < psl->sl_count; i++)
432 newpsl->sl_addr[i] = psl->sl_addr[i];
433 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
434 }
435 pmc->sflist = psl = newpsl;
436 }
437 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
438 for (i = 0; i < psl->sl_count; i++) {
439 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
440 if (rv == 0) /* There is an error in the address. */
441 goto done;
442 }
443 for (j = psl->sl_count-1; j >= i; j--)
444 psl->sl_addr[j+1] = psl->sl_addr[j];
445 psl->sl_addr[i] = *source;
446 psl->sl_count++;
447 err = 0;
448 /* update the interface list */
449 ip6_mc_add_src(idev, group, omode, 1, source, 1);
450done:
451 if (pmclocked)
452 write_unlock(&pmc->sflock);
453 read_unlock_bh(&idev->lock);
454 rcu_read_unlock();
455 if (leavegroup)
456 err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
457 return err;
458}
459
460int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
461 struct sockaddr_storage *list)
462{
463 const struct in6_addr *group;
464 struct ipv6_mc_socklist *pmc;
465 struct inet6_dev *idev;
466 struct ipv6_pinfo *inet6 = inet6_sk(sk);
467 struct ip6_sf_socklist *newpsl, *psl;
468 struct net *net = sock_net(sk);
469 int leavegroup = 0;
470 int i, err;
471
472 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
473
474 if (!ipv6_addr_is_multicast(group))
475 return -EINVAL;
476 if (gsf->gf_fmode != MCAST_INCLUDE &&
477 gsf->gf_fmode != MCAST_EXCLUDE)
478 return -EINVAL;
479
480 rcu_read_lock();
481 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
482
483 if (!idev) {
484 rcu_read_unlock();
485 return -ENODEV;
486 }
487
488 err = 0;
489
490 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
491 leavegroup = 1;
492 goto done;
493 }
494
495 for_each_pmc_rcu(inet6, pmc) {
496 if (pmc->ifindex != gsf->gf_interface)
497 continue;
498 if (ipv6_addr_equal(&pmc->addr, group))
499 break;
500 }
501 if (!pmc) { /* must have a prior join */
502 err = -EINVAL;
503 goto done;
504 }
505 if (gsf->gf_numsrc) {
506 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
507 GFP_ATOMIC);
508 if (!newpsl) {
509 err = -ENOBUFS;
510 goto done;
511 }
512 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
513 for (i = 0; i < newpsl->sl_count; ++i, ++list) {
514 struct sockaddr_in6 *psin6;
515
516 psin6 = (struct sockaddr_in6 *)list;
517 newpsl->sl_addr[i] = psin6->sin6_addr;
518 }
519 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
520 newpsl->sl_count, newpsl->sl_addr, 0);
521 if (err) {
522 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
523 goto done;
524 }
525 } else {
526 newpsl = NULL;
527 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
528 }
529
530 write_lock(&pmc->sflock);
531 psl = pmc->sflist;
532 if (psl) {
533 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
534 psl->sl_count, psl->sl_addr, 0);
535 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
536 } else
537 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
538 pmc->sflist = newpsl;
539 pmc->sfmode = gsf->gf_fmode;
540 write_unlock(&pmc->sflock);
541 err = 0;
542done:
543 read_unlock_bh(&idev->lock);
544 rcu_read_unlock();
545 if (leavegroup)
546 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
547 return err;
548}
549
550int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
551 struct sockaddr_storage *p)
552{
553 int err, i, count, copycount;
554 const struct in6_addr *group;
555 struct ipv6_mc_socklist *pmc;
556 struct inet6_dev *idev;
557 struct ipv6_pinfo *inet6 = inet6_sk(sk);
558 struct ip6_sf_socklist *psl;
559 struct net *net = sock_net(sk);
560
561 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
562
563 if (!ipv6_addr_is_multicast(group))
564 return -EINVAL;
565
566 rcu_read_lock();
567 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
568
569 if (!idev) {
570 rcu_read_unlock();
571 return -ENODEV;
572 }
573
574 err = -EADDRNOTAVAIL;
575 /* changes to the ipv6_mc_list require the socket lock and
576 * rtnl lock. We have the socket lock and rcu read lock,
577 * so reading the list is safe.
578 */
579
580 for_each_pmc_rcu(inet6, pmc) {
581 if (pmc->ifindex != gsf->gf_interface)
582 continue;
583 if (ipv6_addr_equal(group, &pmc->addr))
584 break;
585 }
586 if (!pmc) /* must have a prior join */
587 goto done;
588 gsf->gf_fmode = pmc->sfmode;
589 psl = pmc->sflist;
590 count = psl ? psl->sl_count : 0;
591 read_unlock_bh(&idev->lock);
592 rcu_read_unlock();
593
594 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
595 gsf->gf_numsrc = count;
596 /* changes to psl require the socket lock, and a write lock
597 * on pmc->sflock. We have the socket lock so reading here is safe.
598 */
599 for (i = 0; i < copycount; i++, p++) {
600 struct sockaddr_in6 *psin6;
601 struct sockaddr_storage ss;
602
603 psin6 = (struct sockaddr_in6 *)&ss;
604 memset(&ss, 0, sizeof(ss));
605 psin6->sin6_family = AF_INET6;
606 psin6->sin6_addr = psl->sl_addr[i];
607 if (copy_to_user(p, &ss, sizeof(ss)))
608 return -EFAULT;
609 }
610 return 0;
611done:
612 read_unlock_bh(&idev->lock);
613 rcu_read_unlock();
614 return err;
615}
616
617bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
618 const struct in6_addr *src_addr)
619{
620 struct ipv6_pinfo *np = inet6_sk(sk);
621 struct ipv6_mc_socklist *mc;
622 struct ip6_sf_socklist *psl;
623 bool rv = true;
624
625 rcu_read_lock();
626 for_each_pmc_rcu(np, mc) {
627 if (ipv6_addr_equal(&mc->addr, mc_addr))
628 break;
629 }
630 if (!mc) {
631 rcu_read_unlock();
632 return np->mc_all;
633 }
634 read_lock(&mc->sflock);
635 psl = mc->sflist;
636 if (!psl) {
637 rv = mc->sfmode == MCAST_EXCLUDE;
638 } else {
639 int i;
640
641 for (i = 0; i < psl->sl_count; i++) {
642 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
643 break;
644 }
645 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
646 rv = false;
647 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
648 rv = false;
649 }
650 read_unlock(&mc->sflock);
651 rcu_read_unlock();
652
653 return rv;
654}
655
656static void igmp6_group_added(struct ifmcaddr6 *mc)
657{
658 struct net_device *dev = mc->idev->dev;
659 char buf[MAX_ADDR_LEN];
660
661 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
662 IPV6_ADDR_SCOPE_LINKLOCAL)
663 return;
664
665 spin_lock_bh(&mc->mca_lock);
666 if (!(mc->mca_flags&MAF_LOADED)) {
667 mc->mca_flags |= MAF_LOADED;
668 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
669 dev_mc_add(dev, buf);
670 }
671 spin_unlock_bh(&mc->mca_lock);
672
673 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
674 return;
675
676 if (mld_in_v1_mode(mc->idev)) {
677 igmp6_join_group(mc);
678 return;
679 }
680 /* else v2 */
681
682 /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
683 * should not send filter-mode change record as the mode
684 * should be from IN() to IN(A).
685 */
686 if (mc->mca_sfmode == MCAST_EXCLUDE)
687 mc->mca_crcount = mc->idev->mc_qrv;
688
689 mld_ifc_event(mc->idev);
690}
691
692static void igmp6_group_dropped(struct ifmcaddr6 *mc)
693{
694 struct net_device *dev = mc->idev->dev;
695 char buf[MAX_ADDR_LEN];
696
697 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
698 IPV6_ADDR_SCOPE_LINKLOCAL)
699 return;
700
701 spin_lock_bh(&mc->mca_lock);
702 if (mc->mca_flags&MAF_LOADED) {
703 mc->mca_flags &= ~MAF_LOADED;
704 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
705 dev_mc_del(dev, buf);
706 }
707
708 spin_unlock_bh(&mc->mca_lock);
709 if (mc->mca_flags & MAF_NOREPORT)
710 return;
711
712 if (!mc->idev->dead)
713 igmp6_leave_group(mc);
714
715 spin_lock_bh(&mc->mca_lock);
716 if (del_timer(&mc->mca_timer))
717 refcount_dec(&mc->mca_refcnt);
718 spin_unlock_bh(&mc->mca_lock);
719}
720
721/*
722 * deleted ifmcaddr6 manipulation
723 */
724static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
725{
726 struct ifmcaddr6 *pmc;
727
728 /* this is an "ifmcaddr6" for convenience; only the fields below
729 * are actually used. In particular, the refcnt and users are not
730 * used for management of the delete list. Using the same structure
731 * for deleted items allows change reports to use common code with
732 * non-deleted or query-response MCA's.
733 */
734 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
735 if (!pmc)
736 return;
737
738 spin_lock_bh(&im->mca_lock);
739 spin_lock_init(&pmc->mca_lock);
740 pmc->idev = im->idev;
741 in6_dev_hold(idev);
742 pmc->mca_addr = im->mca_addr;
743 pmc->mca_crcount = idev->mc_qrv;
744 pmc->mca_sfmode = im->mca_sfmode;
745 if (pmc->mca_sfmode == MCAST_INCLUDE) {
746 struct ip6_sf_list *psf;
747
748 pmc->mca_tomb = im->mca_tomb;
749 pmc->mca_sources = im->mca_sources;
750 im->mca_tomb = im->mca_sources = NULL;
751 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
752 psf->sf_crcount = pmc->mca_crcount;
753 }
754 spin_unlock_bh(&im->mca_lock);
755
756 spin_lock_bh(&idev->mc_lock);
757 pmc->next = idev->mc_tomb;
758 idev->mc_tomb = pmc;
759 spin_unlock_bh(&idev->mc_lock);
760}
761
762static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
763{
764 struct ifmcaddr6 *pmc, *pmc_prev;
765 struct ip6_sf_list *psf;
766 struct in6_addr *pmca = &im->mca_addr;
767
768 spin_lock_bh(&idev->mc_lock);
769 pmc_prev = NULL;
770 for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
771 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
772 break;
773 pmc_prev = pmc;
774 }
775 if (pmc) {
776 if (pmc_prev)
777 pmc_prev->next = pmc->next;
778 else
779 idev->mc_tomb = pmc->next;
780 }
781 spin_unlock_bh(&idev->mc_lock);
782
783 spin_lock_bh(&im->mca_lock);
784 if (pmc) {
785 im->idev = pmc->idev;
786 if (im->mca_sfmode == MCAST_INCLUDE) {
787 swap(im->mca_tomb, pmc->mca_tomb);
788 swap(im->mca_sources, pmc->mca_sources);
789 for (psf = im->mca_sources; psf; psf = psf->sf_next)
790 psf->sf_crcount = idev->mc_qrv;
791 } else {
792 im->mca_crcount = idev->mc_qrv;
793 }
794 in6_dev_put(pmc->idev);
795 ip6_mc_clear_src(pmc);
796 kfree(pmc);
797 }
798 spin_unlock_bh(&im->mca_lock);
799}
800
801static void mld_clear_delrec(struct inet6_dev *idev)
802{
803 struct ifmcaddr6 *pmc, *nextpmc;
804
805 spin_lock_bh(&idev->mc_lock);
806 pmc = idev->mc_tomb;
807 idev->mc_tomb = NULL;
808 spin_unlock_bh(&idev->mc_lock);
809
810 for (; pmc; pmc = nextpmc) {
811 nextpmc = pmc->next;
812 ip6_mc_clear_src(pmc);
813 in6_dev_put(pmc->idev);
814 kfree(pmc);
815 }
816
817 /* clear dead sources, too */
818 read_lock_bh(&idev->lock);
819 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
820 struct ip6_sf_list *psf, *psf_next;
821
822 spin_lock_bh(&pmc->mca_lock);
823 psf = pmc->mca_tomb;
824 pmc->mca_tomb = NULL;
825 spin_unlock_bh(&pmc->mca_lock);
826 for (; psf; psf = psf_next) {
827 psf_next = psf->sf_next;
828 kfree(psf);
829 }
830 }
831 read_unlock_bh(&idev->lock);
832}
833
834static void mca_get(struct ifmcaddr6 *mc)
835{
836 refcount_inc(&mc->mca_refcnt);
837}
838
839static void ma_put(struct ifmcaddr6 *mc)
840{
841 if (refcount_dec_and_test(&mc->mca_refcnt)) {
842 in6_dev_put(mc->idev);
843 kfree(mc);
844 }
845}
846
847static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
848 const struct in6_addr *addr,
849 unsigned int mode)
850{
851 struct ifmcaddr6 *mc;
852
853 mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
854 if (!mc)
855 return NULL;
856
857 timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
858
859 mc->mca_addr = *addr;
860 mc->idev = idev; /* reference taken by caller */
861 mc->mca_users = 1;
862 /* mca_stamp should be updated upon changes */
863 mc->mca_cstamp = mc->mca_tstamp = jiffies;
864 refcount_set(&mc->mca_refcnt, 1);
865 spin_lock_init(&mc->mca_lock);
866
867 mc->mca_sfmode = mode;
868 mc->mca_sfcount[mode] = 1;
869
870 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
871 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
872 mc->mca_flags |= MAF_NOREPORT;
873
874 return mc;
875}
876
877/*
878 * device multicast group inc (add if not found)
879 */
880static int __ipv6_dev_mc_inc(struct net_device *dev,
881 const struct in6_addr *addr, unsigned int mode)
882{
883 struct ifmcaddr6 *mc;
884 struct inet6_dev *idev;
885
886 ASSERT_RTNL();
887
888 /* we need to take a reference on idev */
889 idev = in6_dev_get(dev);
890
891 if (!idev)
892 return -EINVAL;
893
894 write_lock_bh(&idev->lock);
895 if (idev->dead) {
896 write_unlock_bh(&idev->lock);
897 in6_dev_put(idev);
898 return -ENODEV;
899 }
900
901 for (mc = idev->mc_list; mc; mc = mc->next) {
902 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
903 mc->mca_users++;
904 write_unlock_bh(&idev->lock);
905 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
906 in6_dev_put(idev);
907 return 0;
908 }
909 }
910
911 mc = mca_alloc(idev, addr, mode);
912 if (!mc) {
913 write_unlock_bh(&idev->lock);
914 in6_dev_put(idev);
915 return -ENOMEM;
916 }
917
918 mc->next = idev->mc_list;
919 idev->mc_list = mc;
920
921 /* Hold this for the code below before we unlock,
922 * it is already exposed via idev->mc_list.
923 */
924 mca_get(mc);
925 write_unlock_bh(&idev->lock);
926
927 mld_del_delrec(idev, mc);
928 igmp6_group_added(mc);
929 ma_put(mc);
930 return 0;
931}
932
933int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
934{
935 return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
936}
937EXPORT_SYMBOL(ipv6_dev_mc_inc);
938
939/*
940 * device multicast group del
941 */
942int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
943{
944 struct ifmcaddr6 *ma, **map;
945
946 ASSERT_RTNL();
947
948 write_lock_bh(&idev->lock);
949 for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) {
950 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
951 if (--ma->mca_users == 0) {
952 *map = ma->next;
953 write_unlock_bh(&idev->lock);
954
955 igmp6_group_dropped(ma);
956 ip6_mc_clear_src(ma);
957
958 ma_put(ma);
959 return 0;
960 }
961 write_unlock_bh(&idev->lock);
962 return 0;
963 }
964 }
965 write_unlock_bh(&idev->lock);
966
967 return -ENOENT;
968}
969
970int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
971{
972 struct inet6_dev *idev;
973 int err;
974
975 ASSERT_RTNL();
976
977 idev = __in6_dev_get(dev);
978 if (!idev)
979 err = -ENODEV;
980 else
981 err = __ipv6_dev_mc_dec(idev, addr);
982
983 return err;
984}
985EXPORT_SYMBOL(ipv6_dev_mc_dec);
986
987/*
988 * check if the interface/address pair is valid
989 */
990bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
991 const struct in6_addr *src_addr)
992{
993 struct inet6_dev *idev;
994 struct ifmcaddr6 *mc;
995 bool rv = false;
996
997 rcu_read_lock();
998 idev = __in6_dev_get(dev);
999 if (idev) {
1000 read_lock_bh(&idev->lock);
1001 for (mc = idev->mc_list; mc; mc = mc->next) {
1002 if (ipv6_addr_equal(&mc->mca_addr, group))
1003 break;
1004 }
1005 if (mc) {
1006 if (src_addr && !ipv6_addr_any(src_addr)) {
1007 struct ip6_sf_list *psf;
1008
1009 spin_lock_bh(&mc->mca_lock);
1010 for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
1011 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1012 break;
1013 }
1014 if (psf)
1015 rv = psf->sf_count[MCAST_INCLUDE] ||
1016 psf->sf_count[MCAST_EXCLUDE] !=
1017 mc->mca_sfcount[MCAST_EXCLUDE];
1018 else
1019 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
1020 spin_unlock_bh(&mc->mca_lock);
1021 } else
1022 rv = true; /* don't filter unspecified source */
1023 }
1024 read_unlock_bh(&idev->lock);
1025 }
1026 rcu_read_unlock();
1027 return rv;
1028}
1029
1030static void mld_gq_start_timer(struct inet6_dev *idev)
1031{
1032 unsigned long tv = prandom_u32() % idev->mc_maxdelay;
1033
1034 idev->mc_gq_running = 1;
1035 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1036 in6_dev_hold(idev);
1037}
1038
1039static void mld_gq_stop_timer(struct inet6_dev *idev)
1040{
1041 idev->mc_gq_running = 0;
1042 if (del_timer(&idev->mc_gq_timer))
1043 __in6_dev_put(idev);
1044}
1045
1046static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
1047{
1048 unsigned long tv = prandom_u32() % delay;
1049
1050 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1051 in6_dev_hold(idev);
1052}
1053
1054static void mld_ifc_stop_timer(struct inet6_dev *idev)
1055{
1056 idev->mc_ifc_count = 0;
1057 if (del_timer(&idev->mc_ifc_timer))
1058 __in6_dev_put(idev);
1059}
1060
1061static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
1062{
1063 unsigned long tv = prandom_u32() % delay;
1064
1065 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
1066 in6_dev_hold(idev);
1067}
1068
1069static void mld_dad_stop_timer(struct inet6_dev *idev)
1070{
1071 if (del_timer(&idev->mc_dad_timer))
1072 __in6_dev_put(idev);
1073}
1074
1075/*
1076 * IGMP handling (alias multicast ICMPv6 messages)
1077 */
1078
1079static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1080{
1081 unsigned long delay = resptime;
1082
1083 /* Do not start timer for these addresses */
1084 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1085 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1086 return;
1087
1088 if (del_timer(&ma->mca_timer)) {
1089 refcount_dec(&ma->mca_refcnt);
1090 delay = ma->mca_timer.expires - jiffies;
1091 }
1092
1093 if (delay >= resptime)
1094 delay = prandom_u32() % resptime;
1095
1096 ma->mca_timer.expires = jiffies + delay;
1097 if (!mod_timer(&ma->mca_timer, jiffies + delay))
1098 refcount_inc(&ma->mca_refcnt);
1099 ma->mca_flags |= MAF_TIMER_RUNNING;
1100}
1101
1102/* mark EXCLUDE-mode sources */
1103static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1104 const struct in6_addr *srcs)
1105{
1106 struct ip6_sf_list *psf;
1107 int i, scount;
1108
1109 scount = 0;
1110 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1111 if (scount == nsrcs)
1112 break;
1113 for (i = 0; i < nsrcs; i++) {
1114 /* skip inactive filters */
1115 if (psf->sf_count[MCAST_INCLUDE] ||
1116 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1117 psf->sf_count[MCAST_EXCLUDE])
1118 break;
1119 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1120 scount++;
1121 break;
1122 }
1123 }
1124 }
1125 pmc->mca_flags &= ~MAF_GSQUERY;
1126 if (scount == nsrcs) /* all sources excluded */
1127 return false;
1128 return true;
1129}
1130
1131static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1132 const struct in6_addr *srcs)
1133{
1134 struct ip6_sf_list *psf;
1135 int i, scount;
1136
1137 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1138 return mld_xmarksources(pmc, nsrcs, srcs);
1139
1140 /* mark INCLUDE-mode sources */
1141
1142 scount = 0;
1143 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1144 if (scount == nsrcs)
1145 break;
1146 for (i = 0; i < nsrcs; i++) {
1147 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1148 psf->sf_gsresp = 1;
1149 scount++;
1150 break;
1151 }
1152 }
1153 }
1154 if (!scount) {
1155 pmc->mca_flags &= ~MAF_GSQUERY;
1156 return false;
1157 }
1158 pmc->mca_flags |= MAF_GSQUERY;
1159 return true;
1160}
1161
1162static int mld_force_mld_version(const struct inet6_dev *idev)
1163{
1164 /* Normally, both are 0 here. If enforcement to a particular is
1165 * being used, individual device enforcement will have a lower
1166 * precedence over 'all' device (.../conf/all/force_mld_version).
1167 */
1168
1169 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1170 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1171 else
1172 return idev->cnf.force_mld_version;
1173}
1174
1175static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1176{
1177 return mld_force_mld_version(idev) == 2;
1178}
1179
1180static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1181{
1182 return mld_force_mld_version(idev) == 1;
1183}
1184
1185static bool mld_in_v1_mode(const struct inet6_dev *idev)
1186{
1187 if (mld_in_v2_mode_only(idev))
1188 return false;
1189 if (mld_in_v1_mode_only(idev))
1190 return true;
1191 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1192 return true;
1193
1194 return false;
1195}
1196
1197static void mld_set_v1_mode(struct inet6_dev *idev)
1198{
1199 /* RFC3810, relevant sections:
1200 * - 9.1. Robustness Variable
1201 * - 9.2. Query Interval
1202 * - 9.3. Query Response Interval
1203 * - 9.12. Older Version Querier Present Timeout
1204 */
1205 unsigned long switchback;
1206
1207 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1208
1209 idev->mc_v1_seen = jiffies + switchback;
1210}
1211
1212static void mld_update_qrv(struct inet6_dev *idev,
1213 const struct mld2_query *mlh2)
1214{
1215 /* RFC3810, relevant sections:
1216 * - 5.1.8. QRV (Querier's Robustness Variable)
1217 * - 9.1. Robustness Variable
1218 */
1219
1220 /* The value of the Robustness Variable MUST NOT be zero,
1221 * and SHOULD NOT be one. Catch this here if we ever run
1222 * into such a case in future.
1223 */
1224 const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1225 WARN_ON(idev->mc_qrv == 0);
1226
1227 if (mlh2->mld2q_qrv > 0)
1228 idev->mc_qrv = mlh2->mld2q_qrv;
1229
1230 if (unlikely(idev->mc_qrv < min_qrv)) {
1231 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1232 idev->mc_qrv, min_qrv);
1233 idev->mc_qrv = min_qrv;
1234 }
1235}
1236
1237static void mld_update_qi(struct inet6_dev *idev,
1238 const struct mld2_query *mlh2)
1239{
1240 /* RFC3810, relevant sections:
1241 * - 5.1.9. QQIC (Querier's Query Interval Code)
1242 * - 9.2. Query Interval
1243 * - 9.12. Older Version Querier Present Timeout
1244 * (the [Query Interval] in the last Query received)
1245 */
1246 unsigned long mc_qqi;
1247
1248 if (mlh2->mld2q_qqic < 128) {
1249 mc_qqi = mlh2->mld2q_qqic;
1250 } else {
1251 unsigned long mc_man, mc_exp;
1252
1253 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1254 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1255
1256 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1257 }
1258
1259 idev->mc_qi = mc_qqi * HZ;
1260}
1261
1262static void mld_update_qri(struct inet6_dev *idev,
1263 const struct mld2_query *mlh2)
1264{
1265 /* RFC3810, relevant sections:
1266 * - 5.1.3. Maximum Response Code
1267 * - 9.3. Query Response Interval
1268 */
1269 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1270}
1271
1272static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1273 unsigned long *max_delay, bool v1_query)
1274{
1275 unsigned long mldv1_md;
1276
1277 /* Ignore v1 queries */
1278 if (mld_in_v2_mode_only(idev))
1279 return -EINVAL;
1280
1281 mldv1_md = ntohs(mld->mld_maxdelay);
1282
1283 /* When in MLDv1 fallback and a MLDv2 router start-up being
1284 * unaware of current MLDv1 operation, the MRC == MRD mapping
1285 * only works when the exponential algorithm is not being
1286 * used (as MLDv1 is unaware of such things).
1287 *
1288 * According to the RFC author, the MLDv2 implementations
1289 * he's aware of all use a MRC < 32768 on start up queries.
1290 *
1291 * Thus, should we *ever* encounter something else larger
1292 * than that, just assume the maximum possible within our
1293 * reach.
1294 */
1295 if (!v1_query)
1296 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1297
1298 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1299
1300 /* MLDv1 router present: we need to go into v1 mode *only*
1301 * when an MLDv1 query is received as per section 9.12. of
1302 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1303 * queries MUST be of exactly 24 octets.
1304 */
1305 if (v1_query)
1306 mld_set_v1_mode(idev);
1307
1308 /* cancel MLDv2 report timer */
1309 mld_gq_stop_timer(idev);
1310 /* cancel the interface change timer */
1311 mld_ifc_stop_timer(idev);
1312 /* clear deleted report items */
1313 mld_clear_delrec(idev);
1314
1315 return 0;
1316}
1317
1318static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1319 unsigned long *max_delay)
1320{
1321 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1322
1323 mld_update_qrv(idev, mld);
1324 mld_update_qi(idev, mld);
1325 mld_update_qri(idev, mld);
1326
1327 idev->mc_maxdelay = *max_delay;
1328
1329 return 0;
1330}
1331
1332/* called with rcu_read_lock() */
1333int igmp6_event_query(struct sk_buff *skb)
1334{
1335 struct mld2_query *mlh2 = NULL;
1336 struct ifmcaddr6 *ma;
1337 const struct in6_addr *group;
1338 unsigned long max_delay;
1339 struct inet6_dev *idev;
1340 struct mld_msg *mld;
1341 int group_type;
1342 int mark = 0;
1343 int len, err;
1344
1345 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1346 return -EINVAL;
1347
1348 /* compute payload length excluding extension headers */
1349 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1350 len -= skb_network_header_len(skb);
1351
1352 /* RFC3810 6.2
1353 * Upon reception of an MLD message that contains a Query, the node
1354 * checks if the source address of the message is a valid link-local
1355 * address, if the Hop Limit is set to 1, and if the Router Alert
1356 * option is present in the Hop-By-Hop Options header of the IPv6
1357 * packet. If any of these checks fails, the packet is dropped.
1358 */
1359 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1360 ipv6_hdr(skb)->hop_limit != 1 ||
1361 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1362 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1363 return -EINVAL;
1364
1365 idev = __in6_dev_get(skb->dev);
1366 if (!idev)
1367 return 0;
1368
1369 mld = (struct mld_msg *)icmp6_hdr(skb);
1370 group = &mld->mld_mca;
1371 group_type = ipv6_addr_type(group);
1372
1373 if (group_type != IPV6_ADDR_ANY &&
1374 !(group_type&IPV6_ADDR_MULTICAST))
1375 return -EINVAL;
1376
1377 if (len < MLD_V1_QUERY_LEN) {
1378 return -EINVAL;
1379 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1380 err = mld_process_v1(idev, mld, &max_delay,
1381 len == MLD_V1_QUERY_LEN);
1382 if (err < 0)
1383 return err;
1384 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1385 int srcs_offset = sizeof(struct mld2_query) -
1386 sizeof(struct icmp6hdr);
1387
1388 if (!pskb_may_pull(skb, srcs_offset))
1389 return -EINVAL;
1390
1391 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1392
1393 err = mld_process_v2(idev, mlh2, &max_delay);
1394 if (err < 0)
1395 return err;
1396
1397 if (group_type == IPV6_ADDR_ANY) { /* general query */
1398 if (mlh2->mld2q_nsrcs)
1399 return -EINVAL; /* no sources allowed */
1400
1401 mld_gq_start_timer(idev);
1402 return 0;
1403 }
1404 /* mark sources to include, if group & source-specific */
1405 if (mlh2->mld2q_nsrcs != 0) {
1406 if (!pskb_may_pull(skb, srcs_offset +
1407 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1408 return -EINVAL;
1409
1410 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1411 mark = 1;
1412 }
1413 } else {
1414 return -EINVAL;
1415 }
1416
1417 read_lock_bh(&idev->lock);
1418 if (group_type == IPV6_ADDR_ANY) {
1419 for (ma = idev->mc_list; ma; ma = ma->next) {
1420 spin_lock_bh(&ma->mca_lock);
1421 igmp6_group_queried(ma, max_delay);
1422 spin_unlock_bh(&ma->mca_lock);
1423 }
1424 } else {
1425 for (ma = idev->mc_list; ma; ma = ma->next) {
1426 if (!ipv6_addr_equal(group, &ma->mca_addr))
1427 continue;
1428 spin_lock_bh(&ma->mca_lock);
1429 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1430 /* gsquery <- gsquery && mark */
1431 if (!mark)
1432 ma->mca_flags &= ~MAF_GSQUERY;
1433 } else {
1434 /* gsquery <- mark */
1435 if (mark)
1436 ma->mca_flags |= MAF_GSQUERY;
1437 else
1438 ma->mca_flags &= ~MAF_GSQUERY;
1439 }
1440 if (!(ma->mca_flags & MAF_GSQUERY) ||
1441 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1442 igmp6_group_queried(ma, max_delay);
1443 spin_unlock_bh(&ma->mca_lock);
1444 break;
1445 }
1446 }
1447 read_unlock_bh(&idev->lock);
1448
1449 return 0;
1450}
1451
1452/* called with rcu_read_lock() */
1453int igmp6_event_report(struct sk_buff *skb)
1454{
1455 struct ifmcaddr6 *ma;
1456 struct inet6_dev *idev;
1457 struct mld_msg *mld;
1458 int addr_type;
1459
1460 /* Our own report looped back. Ignore it. */
1461 if (skb->pkt_type == PACKET_LOOPBACK)
1462 return 0;
1463
1464 /* send our report if the MC router may not have heard this report */
1465 if (skb->pkt_type != PACKET_MULTICAST &&
1466 skb->pkt_type != PACKET_BROADCAST)
1467 return 0;
1468
1469 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1470 return -EINVAL;
1471
1472 mld = (struct mld_msg *)icmp6_hdr(skb);
1473
1474 /* Drop reports with not link local source */
1475 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1476 if (addr_type != IPV6_ADDR_ANY &&
1477 !(addr_type&IPV6_ADDR_LINKLOCAL))
1478 return -EINVAL;
1479
1480 idev = __in6_dev_get(skb->dev);
1481 if (!idev)
1482 return -ENODEV;
1483
1484 /*
1485 * Cancel the timer for this group
1486 */
1487
1488 read_lock_bh(&idev->lock);
1489 for (ma = idev->mc_list; ma; ma = ma->next) {
1490 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1491 spin_lock(&ma->mca_lock);
1492 if (del_timer(&ma->mca_timer))
1493 refcount_dec(&ma->mca_refcnt);
1494 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1495 spin_unlock(&ma->mca_lock);
1496 break;
1497 }
1498 }
1499 read_unlock_bh(&idev->lock);
1500 return 0;
1501}
1502
1503static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1504 int gdeleted, int sdeleted)
1505{
1506 switch (type) {
1507 case MLD2_MODE_IS_INCLUDE:
1508 case MLD2_MODE_IS_EXCLUDE:
1509 if (gdeleted || sdeleted)
1510 return false;
1511 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1512 if (pmc->mca_sfmode == MCAST_INCLUDE)
1513 return true;
1514 /* don't include if this source is excluded
1515 * in all filters
1516 */
1517 if (psf->sf_count[MCAST_INCLUDE])
1518 return type == MLD2_MODE_IS_INCLUDE;
1519 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1520 psf->sf_count[MCAST_EXCLUDE];
1521 }
1522 return false;
1523 case MLD2_CHANGE_TO_INCLUDE:
1524 if (gdeleted || sdeleted)
1525 return false;
1526 return psf->sf_count[MCAST_INCLUDE] != 0;
1527 case MLD2_CHANGE_TO_EXCLUDE:
1528 if (gdeleted || sdeleted)
1529 return false;
1530 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1531 psf->sf_count[MCAST_INCLUDE])
1532 return false;
1533 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1534 psf->sf_count[MCAST_EXCLUDE];
1535 case MLD2_ALLOW_NEW_SOURCES:
1536 if (gdeleted || !psf->sf_crcount)
1537 return false;
1538 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1539 case MLD2_BLOCK_OLD_SOURCES:
1540 if (pmc->mca_sfmode == MCAST_INCLUDE)
1541 return gdeleted || (psf->sf_crcount && sdeleted);
1542 return psf->sf_crcount && !gdeleted && !sdeleted;
1543 }
1544 return false;
1545}
1546
1547static int
1548mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1549{
1550 struct ip6_sf_list *psf;
1551 int scount = 0;
1552
1553 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1554 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1555 continue;
1556 scount++;
1557 }
1558 return scount;
1559}
1560
1561static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1562 struct net_device *dev,
1563 const struct in6_addr *saddr,
1564 const struct in6_addr *daddr,
1565 int proto, int len)
1566{
1567 struct ipv6hdr *hdr;
1568
1569 skb->protocol = htons(ETH_P_IPV6);
1570 skb->dev = dev;
1571
1572 skb_reset_network_header(skb);
1573 skb_put(skb, sizeof(struct ipv6hdr));
1574 hdr = ipv6_hdr(skb);
1575
1576 ip6_flow_hdr(hdr, 0, 0);
1577
1578 hdr->payload_len = htons(len);
1579 hdr->nexthdr = proto;
1580 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1581
1582 hdr->saddr = *saddr;
1583 hdr->daddr = *daddr;
1584}
1585
1586static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1587{
1588 struct net_device *dev = idev->dev;
1589 struct net *net = dev_net(dev);
1590 struct sock *sk = net->ipv6.igmp_sk;
1591 struct sk_buff *skb;
1592 struct mld2_report *pmr;
1593 struct in6_addr addr_buf;
1594 const struct in6_addr *saddr;
1595 int hlen = LL_RESERVED_SPACE(dev);
1596 int tlen = dev->needed_tailroom;
1597 unsigned int size = mtu + hlen + tlen;
1598 int err;
1599 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1600 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1601 IPV6_TLV_PADN, 0 };
1602
1603 /* we assume size > sizeof(ra) here */
1604 /* limit our allocations to order-0 page */
1605 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1606 skb = sock_alloc_send_skb(sk, size, 1, &err);
1607
1608 if (!skb)
1609 return NULL;
1610
1611 skb->priority = TC_PRIO_CONTROL;
1612 skb_reserve(skb, hlen);
1613 skb_tailroom_reserve(skb, mtu, tlen);
1614
1615 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
1616 /* <draft-ietf-magma-mld-source-05.txt>:
1617 * use unspecified address as the source address
1618 * when a valid link-local address is not available.
1619 */
1620 saddr = &in6addr_any;
1621 } else
1622 saddr = &addr_buf;
1623
1624 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1625
1626 skb_put_data(skb, ra, sizeof(ra));
1627
1628 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1629 skb_put(skb, sizeof(*pmr));
1630 pmr = (struct mld2_report *)skb_transport_header(skb);
1631 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1632 pmr->mld2r_resv1 = 0;
1633 pmr->mld2r_cksum = 0;
1634 pmr->mld2r_resv2 = 0;
1635 pmr->mld2r_ngrec = 0;
1636 return skb;
1637}
1638
1639static void mld_sendpack(struct sk_buff *skb)
1640{
1641 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1642 struct mld2_report *pmr =
1643 (struct mld2_report *)skb_transport_header(skb);
1644 int payload_len, mldlen;
1645 struct inet6_dev *idev;
1646 struct net *net = dev_net(skb->dev);
1647 int err;
1648 struct flowi6 fl6;
1649 struct dst_entry *dst;
1650
1651 rcu_read_lock();
1652 idev = __in6_dev_get(skb->dev);
1653 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1654
1655 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1656 sizeof(*pip6);
1657 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1658 pip6->payload_len = htons(payload_len);
1659
1660 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1661 IPPROTO_ICMPV6,
1662 csum_partial(skb_transport_header(skb),
1663 mldlen, 0));
1664
1665 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1666 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1667 skb->dev->ifindex);
1668 dst = icmp6_dst_alloc(skb->dev, &fl6);
1669
1670 err = 0;
1671 if (IS_ERR(dst)) {
1672 err = PTR_ERR(dst);
1673 dst = NULL;
1674 }
1675 skb_dst_set(skb, dst);
1676 if (err)
1677 goto err_out;
1678
1679 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1680 net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1681 dst_output);
1682out:
1683 if (!err) {
1684 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1685 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1686 } else {
1687 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1688 }
1689
1690 rcu_read_unlock();
1691 return;
1692
1693err_out:
1694 kfree_skb(skb);
1695 goto out;
1696}
1697
1698static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1699{
1700 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1701}
1702
1703static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1704 int type, struct mld2_grec **ppgr, unsigned int mtu)
1705{
1706 struct mld2_report *pmr;
1707 struct mld2_grec *pgr;
1708
1709 if (!skb) {
1710 skb = mld_newpack(pmc->idev, mtu);
1711 if (!skb)
1712 return NULL;
1713 }
1714 pgr = skb_put(skb, sizeof(struct mld2_grec));
1715 pgr->grec_type = type;
1716 pgr->grec_auxwords = 0;
1717 pgr->grec_nsrcs = 0;
1718 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1719 pmr = (struct mld2_report *)skb_transport_header(skb);
1720 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1721 *ppgr = pgr;
1722 return skb;
1723}
1724
1725#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1726
1727static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1728 int type, int gdeleted, int sdeleted, int crsend)
1729{
1730 struct inet6_dev *idev = pmc->idev;
1731 struct net_device *dev = idev->dev;
1732 struct mld2_report *pmr;
1733 struct mld2_grec *pgr = NULL;
1734 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1735 int scount, stotal, first, isquery, truncate;
1736 unsigned int mtu;
1737
1738 if (pmc->mca_flags & MAF_NOREPORT)
1739 return skb;
1740
1741 mtu = READ_ONCE(dev->mtu);
1742 if (mtu < IPV6_MIN_MTU)
1743 return skb;
1744
1745 isquery = type == MLD2_MODE_IS_INCLUDE ||
1746 type == MLD2_MODE_IS_EXCLUDE;
1747 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1748 type == MLD2_CHANGE_TO_EXCLUDE;
1749
1750 stotal = scount = 0;
1751
1752 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1753
1754 if (!*psf_list)
1755 goto empty_source;
1756
1757 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1758
1759 /* EX and TO_EX get a fresh packet, if needed */
1760 if (truncate) {
1761 if (pmr && pmr->mld2r_ngrec &&
1762 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1763 if (skb)
1764 mld_sendpack(skb);
1765 skb = mld_newpack(idev, mtu);
1766 }
1767 }
1768 first = 1;
1769 psf_prev = NULL;
1770 for (psf = *psf_list; psf; psf = psf_next) {
1771 struct in6_addr *psrc;
1772
1773 psf_next = psf->sf_next;
1774
1775 if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1776 psf_prev = psf;
1777 continue;
1778 }
1779
1780 /* Based on RFC3810 6.1. Should not send source-list change
1781 * records when there is a filter mode change.
1782 */
1783 if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1784 (!gdeleted && pmc->mca_crcount)) &&
1785 (type == MLD2_ALLOW_NEW_SOURCES ||
1786 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1787 goto decrease_sf_crcount;
1788
1789 /* clear marks on query responses */
1790 if (isquery)
1791 psf->sf_gsresp = 0;
1792
1793 if (AVAILABLE(skb) < sizeof(*psrc) +
1794 first*sizeof(struct mld2_grec)) {
1795 if (truncate && !first)
1796 break; /* truncate these */
1797 if (pgr)
1798 pgr->grec_nsrcs = htons(scount);
1799 if (skb)
1800 mld_sendpack(skb);
1801 skb = mld_newpack(idev, mtu);
1802 first = 1;
1803 scount = 0;
1804 }
1805 if (first) {
1806 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1807 first = 0;
1808 }
1809 if (!skb)
1810 return NULL;
1811 psrc = skb_put(skb, sizeof(*psrc));
1812 *psrc = psf->sf_addr;
1813 scount++; stotal++;
1814 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1815 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1816decrease_sf_crcount:
1817 psf->sf_crcount--;
1818 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1819 if (psf_prev)
1820 psf_prev->sf_next = psf->sf_next;
1821 else
1822 *psf_list = psf->sf_next;
1823 kfree(psf);
1824 continue;
1825 }
1826 }
1827 psf_prev = psf;
1828 }
1829
1830empty_source:
1831 if (!stotal) {
1832 if (type == MLD2_ALLOW_NEW_SOURCES ||
1833 type == MLD2_BLOCK_OLD_SOURCES)
1834 return skb;
1835 if (pmc->mca_crcount || isquery || crsend) {
1836 /* make sure we have room for group header */
1837 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1838 mld_sendpack(skb);
1839 skb = NULL; /* add_grhead will get a new one */
1840 }
1841 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1842 }
1843 }
1844 if (pgr)
1845 pgr->grec_nsrcs = htons(scount);
1846
1847 if (isquery)
1848 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1849 return skb;
1850}
1851
1852static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1853{
1854 struct sk_buff *skb = NULL;
1855 int type;
1856
1857 read_lock_bh(&idev->lock);
1858 if (!pmc) {
1859 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1860 if (pmc->mca_flags & MAF_NOREPORT)
1861 continue;
1862 spin_lock_bh(&pmc->mca_lock);
1863 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1864 type = MLD2_MODE_IS_EXCLUDE;
1865 else
1866 type = MLD2_MODE_IS_INCLUDE;
1867 skb = add_grec(skb, pmc, type, 0, 0, 0);
1868 spin_unlock_bh(&pmc->mca_lock);
1869 }
1870 } else {
1871 spin_lock_bh(&pmc->mca_lock);
1872 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1873 type = MLD2_MODE_IS_EXCLUDE;
1874 else
1875 type = MLD2_MODE_IS_INCLUDE;
1876 skb = add_grec(skb, pmc, type, 0, 0, 0);
1877 spin_unlock_bh(&pmc->mca_lock);
1878 }
1879 read_unlock_bh(&idev->lock);
1880 if (skb)
1881 mld_sendpack(skb);
1882}
1883
1884/*
1885 * remove zero-count source records from a source filter list
1886 */
1887static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1888{
1889 struct ip6_sf_list *psf_prev, *psf_next, *psf;
1890
1891 psf_prev = NULL;
1892 for (psf = *ppsf; psf; psf = psf_next) {
1893 psf_next = psf->sf_next;
1894 if (psf->sf_crcount == 0) {
1895 if (psf_prev)
1896 psf_prev->sf_next = psf->sf_next;
1897 else
1898 *ppsf = psf->sf_next;
1899 kfree(psf);
1900 } else
1901 psf_prev = psf;
1902 }
1903}
1904
1905static void mld_send_cr(struct inet6_dev *idev)
1906{
1907 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1908 struct sk_buff *skb = NULL;
1909 int type, dtype;
1910
1911 read_lock_bh(&idev->lock);
1912 spin_lock(&idev->mc_lock);
1913
1914 /* deleted MCA's */
1915 pmc_prev = NULL;
1916 for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
1917 pmc_next = pmc->next;
1918 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1919 type = MLD2_BLOCK_OLD_SOURCES;
1920 dtype = MLD2_BLOCK_OLD_SOURCES;
1921 skb = add_grec(skb, pmc, type, 1, 0, 0);
1922 skb = add_grec(skb, pmc, dtype, 1, 1, 0);
1923 }
1924 if (pmc->mca_crcount) {
1925 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1926 type = MLD2_CHANGE_TO_INCLUDE;
1927 skb = add_grec(skb, pmc, type, 1, 0, 0);
1928 }
1929 pmc->mca_crcount--;
1930 if (pmc->mca_crcount == 0) {
1931 mld_clear_zeros(&pmc->mca_tomb);
1932 mld_clear_zeros(&pmc->mca_sources);
1933 }
1934 }
1935 if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1936 !pmc->mca_sources) {
1937 if (pmc_prev)
1938 pmc_prev->next = pmc_next;
1939 else
1940 idev->mc_tomb = pmc_next;
1941 in6_dev_put(pmc->idev);
1942 kfree(pmc);
1943 } else
1944 pmc_prev = pmc;
1945 }
1946 spin_unlock(&idev->mc_lock);
1947
1948 /* change recs */
1949 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1950 spin_lock_bh(&pmc->mca_lock);
1951 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1952 type = MLD2_BLOCK_OLD_SOURCES;
1953 dtype = MLD2_ALLOW_NEW_SOURCES;
1954 } else {
1955 type = MLD2_ALLOW_NEW_SOURCES;
1956 dtype = MLD2_BLOCK_OLD_SOURCES;
1957 }
1958 skb = add_grec(skb, pmc, type, 0, 0, 0);
1959 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
1960
1961 /* filter mode changes */
1962 if (pmc->mca_crcount) {
1963 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1964 type = MLD2_CHANGE_TO_EXCLUDE;
1965 else
1966 type = MLD2_CHANGE_TO_INCLUDE;
1967 skb = add_grec(skb, pmc, type, 0, 0, 0);
1968 pmc->mca_crcount--;
1969 }
1970 spin_unlock_bh(&pmc->mca_lock);
1971 }
1972 read_unlock_bh(&idev->lock);
1973 if (!skb)
1974 return;
1975 (void) mld_sendpack(skb);
1976}
1977
1978static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1979{
1980 struct net *net = dev_net(dev);
1981 struct sock *sk = net->ipv6.igmp_sk;
1982 struct inet6_dev *idev;
1983 struct sk_buff *skb;
1984 struct mld_msg *hdr;
1985 const struct in6_addr *snd_addr, *saddr;
1986 struct in6_addr addr_buf;
1987 int hlen = LL_RESERVED_SPACE(dev);
1988 int tlen = dev->needed_tailroom;
1989 int err, len, payload_len, full_len;
1990 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1991 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1992 IPV6_TLV_PADN, 0 };
1993 struct flowi6 fl6;
1994 struct dst_entry *dst;
1995
1996 if (type == ICMPV6_MGM_REDUCTION)
1997 snd_addr = &in6addr_linklocal_allrouters;
1998 else
1999 snd_addr = addr;
2000
2001 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2002 payload_len = len + sizeof(ra);
2003 full_len = sizeof(struct ipv6hdr) + payload_len;
2004
2005 rcu_read_lock();
2006 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
2007 IPSTATS_MIB_OUT, full_len);
2008 rcu_read_unlock();
2009
2010 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
2011
2012 if (!skb) {
2013 rcu_read_lock();
2014 IP6_INC_STATS(net, __in6_dev_get(dev),
2015 IPSTATS_MIB_OUTDISCARDS);
2016 rcu_read_unlock();
2017 return;
2018 }
2019 skb->priority = TC_PRIO_CONTROL;
2020 skb_reserve(skb, hlen);
2021
2022 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2023 /* <draft-ietf-magma-mld-source-05.txt>:
2024 * use unspecified address as the source address
2025 * when a valid link-local address is not available.
2026 */
2027 saddr = &in6addr_any;
2028 } else
2029 saddr = &addr_buf;
2030
2031 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2032
2033 skb_put_data(skb, ra, sizeof(ra));
2034
2035 hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2036 hdr->mld_type = type;
2037 hdr->mld_mca = *addr;
2038
2039 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2040 IPPROTO_ICMPV6,
2041 csum_partial(hdr, len, 0));
2042
2043 rcu_read_lock();
2044 idev = __in6_dev_get(skb->dev);
2045
2046 icmpv6_flow_init(sk, &fl6, type,
2047 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2048 skb->dev->ifindex);
2049 dst = icmp6_dst_alloc(skb->dev, &fl6);
2050 if (IS_ERR(dst)) {
2051 err = PTR_ERR(dst);
2052 goto err_out;
2053 }
2054
2055 skb_dst_set(skb, dst);
2056 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2057 net, sk, skb, NULL, skb->dev,
2058 dst_output);
2059out:
2060 if (!err) {
2061 ICMP6MSGOUT_INC_STATS(net, idev, type);
2062 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2063 } else
2064 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2065
2066 rcu_read_unlock();
2067 return;
2068
2069err_out:
2070 kfree_skb(skb);
2071 goto out;
2072}
2073
2074static void mld_send_initial_cr(struct inet6_dev *idev)
2075{
2076 struct sk_buff *skb;
2077 struct ifmcaddr6 *pmc;
2078 int type;
2079
2080 if (mld_in_v1_mode(idev))
2081 return;
2082
2083 skb = NULL;
2084 read_lock_bh(&idev->lock);
2085 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2086 spin_lock_bh(&pmc->mca_lock);
2087 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2088 type = MLD2_CHANGE_TO_EXCLUDE;
2089 else
2090 type = MLD2_ALLOW_NEW_SOURCES;
2091 skb = add_grec(skb, pmc, type, 0, 0, 1);
2092 spin_unlock_bh(&pmc->mca_lock);
2093 }
2094 read_unlock_bh(&idev->lock);
2095 if (skb)
2096 mld_sendpack(skb);
2097}
2098
2099void ipv6_mc_dad_complete(struct inet6_dev *idev)
2100{
2101 idev->mc_dad_count = idev->mc_qrv;
2102 if (idev->mc_dad_count) {
2103 mld_send_initial_cr(idev);
2104 idev->mc_dad_count--;
2105 if (idev->mc_dad_count)
2106 mld_dad_start_timer(idev,
2107 unsolicited_report_interval(idev));
2108 }
2109}
2110
2111static void mld_dad_timer_expire(struct timer_list *t)
2112{
2113 struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
2114
2115 mld_send_initial_cr(idev);
2116 if (idev->mc_dad_count) {
2117 idev->mc_dad_count--;
2118 if (idev->mc_dad_count)
2119 mld_dad_start_timer(idev,
2120 unsolicited_report_interval(idev));
2121 }
2122 in6_dev_put(idev);
2123}
2124
2125static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2126 const struct in6_addr *psfsrc)
2127{
2128 struct ip6_sf_list *psf, *psf_prev;
2129 int rv = 0;
2130
2131 psf_prev = NULL;
2132 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2133 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2134 break;
2135 psf_prev = psf;
2136 }
2137 if (!psf || psf->sf_count[sfmode] == 0) {
2138 /* source filter not found, or count wrong => bug */
2139 return -ESRCH;
2140 }
2141 psf->sf_count[sfmode]--;
2142 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2143 struct inet6_dev *idev = pmc->idev;
2144
2145 /* no more filters for this source */
2146 if (psf_prev)
2147 psf_prev->sf_next = psf->sf_next;
2148 else
2149 pmc->mca_sources = psf->sf_next;
2150 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2151 !mld_in_v1_mode(idev)) {
2152 psf->sf_crcount = idev->mc_qrv;
2153 psf->sf_next = pmc->mca_tomb;
2154 pmc->mca_tomb = psf;
2155 rv = 1;
2156 } else
2157 kfree(psf);
2158 }
2159 return rv;
2160}
2161
2162static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2163 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2164 int delta)
2165{
2166 struct ifmcaddr6 *pmc;
2167 int changerec = 0;
2168 int i, err;
2169
2170 if (!idev)
2171 return -ENODEV;
2172 read_lock_bh(&idev->lock);
2173 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2174 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2175 break;
2176 }
2177 if (!pmc) {
2178 /* MCA not found?? bug */
2179 read_unlock_bh(&idev->lock);
2180 return -ESRCH;
2181 }
2182 spin_lock_bh(&pmc->mca_lock);
2183 sf_markstate(pmc);
2184 if (!delta) {
2185 if (!pmc->mca_sfcount[sfmode]) {
2186 spin_unlock_bh(&pmc->mca_lock);
2187 read_unlock_bh(&idev->lock);
2188 return -EINVAL;
2189 }
2190 pmc->mca_sfcount[sfmode]--;
2191 }
2192 err = 0;
2193 for (i = 0; i < sfcount; i++) {
2194 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2195
2196 changerec |= rv > 0;
2197 if (!err && rv < 0)
2198 err = rv;
2199 }
2200 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2201 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2202 pmc->mca_sfcount[MCAST_INCLUDE]) {
2203 struct ip6_sf_list *psf;
2204
2205 /* filter mode change */
2206 pmc->mca_sfmode = MCAST_INCLUDE;
2207 pmc->mca_crcount = idev->mc_qrv;
2208 idev->mc_ifc_count = pmc->mca_crcount;
2209 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2210 psf->sf_crcount = 0;
2211 mld_ifc_event(pmc->idev);
2212 } else if (sf_setstate(pmc) || changerec)
2213 mld_ifc_event(pmc->idev);
2214 spin_unlock_bh(&pmc->mca_lock);
2215 read_unlock_bh(&idev->lock);
2216 return err;
2217}
2218
2219/*
2220 * Add multicast single-source filter to the interface list
2221 */
2222static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2223 const struct in6_addr *psfsrc)
2224{
2225 struct ip6_sf_list *psf, *psf_prev;
2226
2227 psf_prev = NULL;
2228 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2229 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2230 break;
2231 psf_prev = psf;
2232 }
2233 if (!psf) {
2234 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
2235 if (!psf)
2236 return -ENOBUFS;
2237
2238 psf->sf_addr = *psfsrc;
2239 if (psf_prev) {
2240 psf_prev->sf_next = psf;
2241 } else
2242 pmc->mca_sources = psf;
2243 }
2244 psf->sf_count[sfmode]++;
2245 return 0;
2246}
2247
2248static void sf_markstate(struct ifmcaddr6 *pmc)
2249{
2250 struct ip6_sf_list *psf;
2251 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2252
2253 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2254 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2255 psf->sf_oldin = mca_xcount ==
2256 psf->sf_count[MCAST_EXCLUDE] &&
2257 !psf->sf_count[MCAST_INCLUDE];
2258 } else
2259 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2260}
2261
2262static int sf_setstate(struct ifmcaddr6 *pmc)
2263{
2264 struct ip6_sf_list *psf, *dpsf;
2265 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2266 int qrv = pmc->idev->mc_qrv;
2267 int new_in, rv;
2268
2269 rv = 0;
2270 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2271 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2272 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2273 !psf->sf_count[MCAST_INCLUDE];
2274 } else
2275 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2276 if (new_in) {
2277 if (!psf->sf_oldin) {
2278 struct ip6_sf_list *prev = NULL;
2279
2280 for (dpsf = pmc->mca_tomb; dpsf;
2281 dpsf = dpsf->sf_next) {
2282 if (ipv6_addr_equal(&dpsf->sf_addr,
2283 &psf->sf_addr))
2284 break;
2285 prev = dpsf;
2286 }
2287 if (dpsf) {
2288 if (prev)
2289 prev->sf_next = dpsf->sf_next;
2290 else
2291 pmc->mca_tomb = dpsf->sf_next;
2292 kfree(dpsf);
2293 }
2294 psf->sf_crcount = qrv;
2295 rv++;
2296 }
2297 } else if (psf->sf_oldin) {
2298 psf->sf_crcount = 0;
2299 /*
2300 * add or update "delete" records if an active filter
2301 * is now inactive
2302 */
2303 for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
2304 if (ipv6_addr_equal(&dpsf->sf_addr,
2305 &psf->sf_addr))
2306 break;
2307 if (!dpsf) {
2308 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2309 if (!dpsf)
2310 continue;
2311 *dpsf = *psf;
2312 /* pmc->mca_lock held by callers */
2313 dpsf->sf_next = pmc->mca_tomb;
2314 pmc->mca_tomb = dpsf;
2315 }
2316 dpsf->sf_crcount = qrv;
2317 rv++;
2318 }
2319 }
2320 return rv;
2321}
2322
2323/*
2324 * Add multicast source filter list to the interface list
2325 */
2326static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2327 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2328 int delta)
2329{
2330 struct ifmcaddr6 *pmc;
2331 int isexclude;
2332 int i, err;
2333
2334 if (!idev)
2335 return -ENODEV;
2336 read_lock_bh(&idev->lock);
2337 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2338 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2339 break;
2340 }
2341 if (!pmc) {
2342 /* MCA not found?? bug */
2343 read_unlock_bh(&idev->lock);
2344 return -ESRCH;
2345 }
2346 spin_lock_bh(&pmc->mca_lock);
2347
2348 sf_markstate(pmc);
2349 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2350 if (!delta)
2351 pmc->mca_sfcount[sfmode]++;
2352 err = 0;
2353 for (i = 0; i < sfcount; i++) {
2354 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2355 if (err)
2356 break;
2357 }
2358 if (err) {
2359 int j;
2360
2361 if (!delta)
2362 pmc->mca_sfcount[sfmode]--;
2363 for (j = 0; j < i; j++)
2364 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2365 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2366 struct ip6_sf_list *psf;
2367
2368 /* filter mode change */
2369 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2370 pmc->mca_sfmode = MCAST_EXCLUDE;
2371 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2372 pmc->mca_sfmode = MCAST_INCLUDE;
2373 /* else no filters; keep old mode for reports */
2374
2375 pmc->mca_crcount = idev->mc_qrv;
2376 idev->mc_ifc_count = pmc->mca_crcount;
2377 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2378 psf->sf_crcount = 0;
2379 mld_ifc_event(idev);
2380 } else if (sf_setstate(pmc))
2381 mld_ifc_event(idev);
2382 spin_unlock_bh(&pmc->mca_lock);
2383 read_unlock_bh(&idev->lock);
2384 return err;
2385}
2386
2387static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2388{
2389 struct ip6_sf_list *psf, *nextpsf;
2390
2391 for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
2392 nextpsf = psf->sf_next;
2393 kfree(psf);
2394 }
2395 pmc->mca_tomb = NULL;
2396 for (psf = pmc->mca_sources; psf; psf = nextpsf) {
2397 nextpsf = psf->sf_next;
2398 kfree(psf);
2399 }
2400 pmc->mca_sources = NULL;
2401 pmc->mca_sfmode = MCAST_EXCLUDE;
2402 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2403 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2404}
2405
2406
2407static void igmp6_join_group(struct ifmcaddr6 *ma)
2408{
2409 unsigned long delay;
2410
2411 if (ma->mca_flags & MAF_NOREPORT)
2412 return;
2413
2414 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2415
2416 delay = prandom_u32() % unsolicited_report_interval(ma->idev);
2417
2418 spin_lock_bh(&ma->mca_lock);
2419 if (del_timer(&ma->mca_timer)) {
2420 refcount_dec(&ma->mca_refcnt);
2421 delay = ma->mca_timer.expires - jiffies;
2422 }
2423
2424 if (!mod_timer(&ma->mca_timer, jiffies + delay))
2425 refcount_inc(&ma->mca_refcnt);
2426 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2427 spin_unlock_bh(&ma->mca_lock);
2428}
2429
2430static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2431 struct inet6_dev *idev)
2432{
2433 int err;
2434
2435 write_lock_bh(&iml->sflock);
2436 if (!iml->sflist) {
2437 /* any-source empty exclude case */
2438 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2439 } else {
2440 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2441 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2442 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2443 iml->sflist = NULL;
2444 }
2445 write_unlock_bh(&iml->sflock);
2446 return err;
2447}
2448
2449static void igmp6_leave_group(struct ifmcaddr6 *ma)
2450{
2451 if (mld_in_v1_mode(ma->idev)) {
2452 if (ma->mca_flags & MAF_LAST_REPORTER)
2453 igmp6_send(&ma->mca_addr, ma->idev->dev,
2454 ICMPV6_MGM_REDUCTION);
2455 } else {
2456 mld_add_delrec(ma->idev, ma);
2457 mld_ifc_event(ma->idev);
2458 }
2459}
2460
2461static void mld_gq_timer_expire(struct timer_list *t)
2462{
2463 struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
2464
2465 idev->mc_gq_running = 0;
2466 mld_send_report(idev, NULL);
2467 in6_dev_put(idev);
2468}
2469
2470static void mld_ifc_timer_expire(struct timer_list *t)
2471{
2472 struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
2473
2474 mld_send_cr(idev);
2475 if (idev->mc_ifc_count) {
2476 idev->mc_ifc_count--;
2477 if (idev->mc_ifc_count)
2478 mld_ifc_start_timer(idev,
2479 unsolicited_report_interval(idev));
2480 }
2481 in6_dev_put(idev);
2482}
2483
2484static void mld_ifc_event(struct inet6_dev *idev)
2485{
2486 if (mld_in_v1_mode(idev))
2487 return;
2488 idev->mc_ifc_count = idev->mc_qrv;
2489 mld_ifc_start_timer(idev, 1);
2490}
2491
2492static void igmp6_timer_handler(struct timer_list *t)
2493{
2494 struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
2495
2496 if (mld_in_v1_mode(ma->idev))
2497 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2498 else
2499 mld_send_report(ma->idev, ma);
2500
2501 spin_lock(&ma->mca_lock);
2502 ma->mca_flags |= MAF_LAST_REPORTER;
2503 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2504 spin_unlock(&ma->mca_lock);
2505 ma_put(ma);
2506}
2507
2508/* Device changing type */
2509
2510void ipv6_mc_unmap(struct inet6_dev *idev)
2511{
2512 struct ifmcaddr6 *i;
2513
2514 /* Install multicast list, except for all-nodes (already installed) */
2515
2516 read_lock_bh(&idev->lock);
2517 for (i = idev->mc_list; i; i = i->next)
2518 igmp6_group_dropped(i);
2519 read_unlock_bh(&idev->lock);
2520}
2521
2522void ipv6_mc_remap(struct inet6_dev *idev)
2523{
2524 ipv6_mc_up(idev);
2525}
2526
2527/* Device going down */
2528
2529void ipv6_mc_down(struct inet6_dev *idev)
2530{
2531 struct ifmcaddr6 *i;
2532
2533 /* Withdraw multicast list */
2534
2535 read_lock_bh(&idev->lock);
2536
2537 for (i = idev->mc_list; i; i = i->next)
2538 igmp6_group_dropped(i);
2539
2540 /* Should stop timer after group drop. or we will
2541 * start timer again in mld_ifc_event()
2542 */
2543 mld_ifc_stop_timer(idev);
2544 mld_gq_stop_timer(idev);
2545 mld_dad_stop_timer(idev);
2546 read_unlock_bh(&idev->lock);
2547}
2548
2549static void ipv6_mc_reset(struct inet6_dev *idev)
2550{
2551 idev->mc_qrv = sysctl_mld_qrv;
2552 idev->mc_qi = MLD_QI_DEFAULT;
2553 idev->mc_qri = MLD_QRI_DEFAULT;
2554 idev->mc_v1_seen = 0;
2555 idev->mc_maxdelay = unsolicited_report_interval(idev);
2556}
2557
2558/* Device going up */
2559
2560void ipv6_mc_up(struct inet6_dev *idev)
2561{
2562 struct ifmcaddr6 *i;
2563
2564 /* Install multicast list, except for all-nodes (already installed) */
2565
2566 read_lock_bh(&idev->lock);
2567 ipv6_mc_reset(idev);
2568 for (i = idev->mc_list; i; i = i->next) {
2569 mld_del_delrec(idev, i);
2570 igmp6_group_added(i);
2571 }
2572 read_unlock_bh(&idev->lock);
2573}
2574
2575/* IPv6 device initialization. */
2576
2577void ipv6_mc_init_dev(struct inet6_dev *idev)
2578{
2579 write_lock_bh(&idev->lock);
2580 spin_lock_init(&idev->mc_lock);
2581 idev->mc_gq_running = 0;
2582 timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
2583 idev->mc_tomb = NULL;
2584 idev->mc_ifc_count = 0;
2585 timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
2586 timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
2587 ipv6_mc_reset(idev);
2588 write_unlock_bh(&idev->lock);
2589}
2590
2591/*
2592 * Device is about to be destroyed: clean up.
2593 */
2594
2595void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2596{
2597 struct ifmcaddr6 *i;
2598
2599 /* Deactivate timers */
2600 ipv6_mc_down(idev);
2601 mld_clear_delrec(idev);
2602
2603 /* Delete all-nodes address. */
2604 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2605 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2606 * fail.
2607 */
2608 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2609
2610 if (idev->cnf.forwarding)
2611 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2612
2613 write_lock_bh(&idev->lock);
2614 while ((i = idev->mc_list) != NULL) {
2615 idev->mc_list = i->next;
2616
2617 write_unlock_bh(&idev->lock);
2618 ip6_mc_clear_src(i);
2619 ma_put(i);
2620 write_lock_bh(&idev->lock);
2621 }
2622 write_unlock_bh(&idev->lock);
2623}
2624
2625static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2626{
2627 struct ifmcaddr6 *pmc;
2628
2629 ASSERT_RTNL();
2630
2631 if (mld_in_v1_mode(idev)) {
2632 read_lock_bh(&idev->lock);
2633 for (pmc = idev->mc_list; pmc; pmc = pmc->next)
2634 igmp6_join_group(pmc);
2635 read_unlock_bh(&idev->lock);
2636 } else
2637 mld_send_report(idev, NULL);
2638}
2639
2640static int ipv6_mc_netdev_event(struct notifier_block *this,
2641 unsigned long event,
2642 void *ptr)
2643{
2644 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2645 struct inet6_dev *idev = __in6_dev_get(dev);
2646
2647 switch (event) {
2648 case NETDEV_RESEND_IGMP:
2649 if (idev)
2650 ipv6_mc_rejoin_groups(idev);
2651 break;
2652 default:
2653 break;
2654 }
2655
2656 return NOTIFY_DONE;
2657}
2658
2659static struct notifier_block igmp6_netdev_notifier = {
2660 .notifier_call = ipv6_mc_netdev_event,
2661};
2662
2663#ifdef CONFIG_PROC_FS
2664struct igmp6_mc_iter_state {
2665 struct seq_net_private p;
2666 struct net_device *dev;
2667 struct inet6_dev *idev;
2668};
2669
2670#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2671
2672static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2673{
2674 struct ifmcaddr6 *im = NULL;
2675 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2676 struct net *net = seq_file_net(seq);
2677
2678 state->idev = NULL;
2679 for_each_netdev_rcu(net, state->dev) {
2680 struct inet6_dev *idev;
2681 idev = __in6_dev_get(state->dev);
2682 if (!idev)
2683 continue;
2684 read_lock_bh(&idev->lock);
2685 im = idev->mc_list;
2686 if (im) {
2687 state->idev = idev;
2688 break;
2689 }
2690 read_unlock_bh(&idev->lock);
2691 }
2692 return im;
2693}
2694
2695static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2696{
2697 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2698
2699 im = im->next;
2700 while (!im) {
2701 if (likely(state->idev))
2702 read_unlock_bh(&state->idev->lock);
2703
2704 state->dev = next_net_device_rcu(state->dev);
2705 if (!state->dev) {
2706 state->idev = NULL;
2707 break;
2708 }
2709 state->idev = __in6_dev_get(state->dev);
2710 if (!state->idev)
2711 continue;
2712 read_lock_bh(&state->idev->lock);
2713 im = state->idev->mc_list;
2714 }
2715 return im;
2716}
2717
2718static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2719{
2720 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2721 if (im)
2722 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2723 --pos;
2724 return pos ? NULL : im;
2725}
2726
2727static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2728 __acquires(RCU)
2729{
2730 rcu_read_lock();
2731 return igmp6_mc_get_idx(seq, *pos);
2732}
2733
2734static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2735{
2736 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2737
2738 ++*pos;
2739 return im;
2740}
2741
2742static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2743 __releases(RCU)
2744{
2745 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2746
2747 if (likely(state->idev)) {
2748 read_unlock_bh(&state->idev->lock);
2749 state->idev = NULL;
2750 }
2751 state->dev = NULL;
2752 rcu_read_unlock();
2753}
2754
2755static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2756{
2757 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2758 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2759
2760 seq_printf(seq,
2761 "%-4d %-15s %pi6 %5d %08X %ld\n",
2762 state->dev->ifindex, state->dev->name,
2763 &im->mca_addr,
2764 im->mca_users, im->mca_flags,
2765 (im->mca_flags&MAF_TIMER_RUNNING) ?
2766 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2767 return 0;
2768}
2769
2770static const struct seq_operations igmp6_mc_seq_ops = {
2771 .start = igmp6_mc_seq_start,
2772 .next = igmp6_mc_seq_next,
2773 .stop = igmp6_mc_seq_stop,
2774 .show = igmp6_mc_seq_show,
2775};
2776
2777struct igmp6_mcf_iter_state {
2778 struct seq_net_private p;
2779 struct net_device *dev;
2780 struct inet6_dev *idev;
2781 struct ifmcaddr6 *im;
2782};
2783
2784#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2785
2786static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2787{
2788 struct ip6_sf_list *psf = NULL;
2789 struct ifmcaddr6 *im = NULL;
2790 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2791 struct net *net = seq_file_net(seq);
2792
2793 state->idev = NULL;
2794 state->im = NULL;
2795 for_each_netdev_rcu(net, state->dev) {
2796 struct inet6_dev *idev;
2797 idev = __in6_dev_get(state->dev);
2798 if (unlikely(idev == NULL))
2799 continue;
2800 read_lock_bh(&idev->lock);
2801 im = idev->mc_list;
2802 if (likely(im)) {
2803 spin_lock_bh(&im->mca_lock);
2804 psf = im->mca_sources;
2805 if (likely(psf)) {
2806 state->im = im;
2807 state->idev = idev;
2808 break;
2809 }
2810 spin_unlock_bh(&im->mca_lock);
2811 }
2812 read_unlock_bh(&idev->lock);
2813 }
2814 return psf;
2815}
2816
2817static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2818{
2819 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2820
2821 psf = psf->sf_next;
2822 while (!psf) {
2823 spin_unlock_bh(&state->im->mca_lock);
2824 state->im = state->im->next;
2825 while (!state->im) {
2826 if (likely(state->idev))
2827 read_unlock_bh(&state->idev->lock);
2828
2829 state->dev = next_net_device_rcu(state->dev);
2830 if (!state->dev) {
2831 state->idev = NULL;
2832 goto out;
2833 }
2834 state->idev = __in6_dev_get(state->dev);
2835 if (!state->idev)
2836 continue;
2837 read_lock_bh(&state->idev->lock);
2838 state->im = state->idev->mc_list;
2839 }
2840 if (!state->im)
2841 break;
2842 spin_lock_bh(&state->im->mca_lock);
2843 psf = state->im->mca_sources;
2844 }
2845out:
2846 return psf;
2847}
2848
2849static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2850{
2851 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2852 if (psf)
2853 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2854 --pos;
2855 return pos ? NULL : psf;
2856}
2857
2858static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2859 __acquires(RCU)
2860{
2861 rcu_read_lock();
2862 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2863}
2864
2865static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2866{
2867 struct ip6_sf_list *psf;
2868 if (v == SEQ_START_TOKEN)
2869 psf = igmp6_mcf_get_first(seq);
2870 else
2871 psf = igmp6_mcf_get_next(seq, v);
2872 ++*pos;
2873 return psf;
2874}
2875
2876static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2877 __releases(RCU)
2878{
2879 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2880 if (likely(state->im)) {
2881 spin_unlock_bh(&state->im->mca_lock);
2882 state->im = NULL;
2883 }
2884 if (likely(state->idev)) {
2885 read_unlock_bh(&state->idev->lock);
2886 state->idev = NULL;
2887 }
2888 state->dev = NULL;
2889 rcu_read_unlock();
2890}
2891
2892static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2893{
2894 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2895 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2896
2897 if (v == SEQ_START_TOKEN) {
2898 seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
2899 } else {
2900 seq_printf(seq,
2901 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2902 state->dev->ifindex, state->dev->name,
2903 &state->im->mca_addr,
2904 &psf->sf_addr,
2905 psf->sf_count[MCAST_INCLUDE],
2906 psf->sf_count[MCAST_EXCLUDE]);
2907 }
2908 return 0;
2909}
2910
2911static const struct seq_operations igmp6_mcf_seq_ops = {
2912 .start = igmp6_mcf_seq_start,
2913 .next = igmp6_mcf_seq_next,
2914 .stop = igmp6_mcf_seq_stop,
2915 .show = igmp6_mcf_seq_show,
2916};
2917
2918static int __net_init igmp6_proc_init(struct net *net)
2919{
2920 int err;
2921
2922 err = -ENOMEM;
2923 if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
2924 sizeof(struct igmp6_mc_iter_state)))
2925 goto out;
2926 if (!proc_create_net("mcfilter6", 0444, net->proc_net,
2927 &igmp6_mcf_seq_ops,
2928 sizeof(struct igmp6_mcf_iter_state)))
2929 goto out_proc_net_igmp6;
2930
2931 err = 0;
2932out:
2933 return err;
2934
2935out_proc_net_igmp6:
2936 remove_proc_entry("igmp6", net->proc_net);
2937 goto out;
2938}
2939
2940static void __net_exit igmp6_proc_exit(struct net *net)
2941{
2942 remove_proc_entry("mcfilter6", net->proc_net);
2943 remove_proc_entry("igmp6", net->proc_net);
2944}
2945#else
2946static inline int igmp6_proc_init(struct net *net)
2947{
2948 return 0;
2949}
2950static inline void igmp6_proc_exit(struct net *net)
2951{
2952}
2953#endif
2954
2955static int __net_init igmp6_net_init(struct net *net)
2956{
2957 int err;
2958
2959 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2960 SOCK_RAW, IPPROTO_ICMPV6, net);
2961 if (err < 0) {
2962 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2963 err);
2964 goto out;
2965 }
2966
2967 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2968
2969 err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
2970 SOCK_RAW, IPPROTO_ICMPV6, net);
2971 if (err < 0) {
2972 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
2973 err);
2974 goto out_sock_create;
2975 }
2976
2977 err = igmp6_proc_init(net);
2978 if (err)
2979 goto out_sock_create_autojoin;
2980
2981 return 0;
2982
2983out_sock_create_autojoin:
2984 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2985out_sock_create:
2986 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2987out:
2988 return err;
2989}
2990
2991static void __net_exit igmp6_net_exit(struct net *net)
2992{
2993 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2994 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2995 igmp6_proc_exit(net);
2996}
2997
2998static struct pernet_operations igmp6_net_ops = {
2999 .init = igmp6_net_init,
3000 .exit = igmp6_net_exit,
3001};
3002
3003int __init igmp6_init(void)
3004{
3005 return register_pernet_subsys(&igmp6_net_ops);
3006}
3007
3008int __init igmp6_late_init(void)
3009{
3010 return register_netdevice_notifier(&igmp6_netdev_notifier);
3011}
3012
3013void igmp6_cleanup(void)
3014{
3015 unregister_pernet_subsys(&igmp6_net_ops);
3016}
3017
3018void igmp6_late_cleanup(void)
3019{
3020 unregister_netdevice_notifier(&igmp6_netdev_notifier);
3021}
1/*
2 * Multicast support for IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16/* Changes:
17 *
18 * yoshfuji : fix format of router-alert option
19 * YOSHIFUJI Hideaki @USAGI:
20 * Fixed source address for MLD message based on
21 * <draft-ietf-magma-mld-source-05.txt>.
22 * YOSHIFUJI Hideaki @USAGI:
23 * - Ignore Queries for invalid addresses.
24 * - MLD for link-local addresses.
25 * David L Stevens <dlstevens@us.ibm.com>:
26 * - MLDv2 support
27 */
28
29#include <linux/module.h>
30#include <linux/errno.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/socket.h>
34#include <linux/sockios.h>
35#include <linux/jiffies.h>
36#include <linux/times.h>
37#include <linux/net.h>
38#include <linux/in.h>
39#include <linux/in6.h>
40#include <linux/netdevice.h>
41#include <linux/if_arp.h>
42#include <linux/route.h>
43#include <linux/init.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/slab.h>
47#include <linux/pkt_sched.h>
48#include <net/mld.h>
49
50#include <linux/netfilter.h>
51#include <linux/netfilter_ipv6.h>
52
53#include <net/net_namespace.h>
54#include <net/sock.h>
55#include <net/snmp.h>
56
57#include <net/ipv6.h>
58#include <net/protocol.h>
59#include <net/if_inet6.h>
60#include <net/ndisc.h>
61#include <net/addrconf.h>
62#include <net/ip6_route.h>
63#include <net/inet_common.h>
64
65#include <net/ip6_checksum.h>
66
67/* Ensure that we have struct in6_addr aligned on 32bit word. */
68static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
69 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
70 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
71 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
72};
73
74static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
75
76static void igmp6_join_group(struct ifmcaddr6 *ma);
77static void igmp6_leave_group(struct ifmcaddr6 *ma);
78static void igmp6_timer_handler(unsigned long data);
79
80static void mld_gq_timer_expire(unsigned long data);
81static void mld_ifc_timer_expire(unsigned long data);
82static void mld_ifc_event(struct inet6_dev *idev);
83static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
84static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
85static void mld_clear_delrec(struct inet6_dev *idev);
86static bool mld_in_v1_mode(const struct inet6_dev *idev);
87static int sf_setstate(struct ifmcaddr6 *pmc);
88static void sf_markstate(struct ifmcaddr6 *pmc);
89static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
90static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
91 int sfmode, int sfcount, const struct in6_addr *psfsrc,
92 int delta);
93static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
94 int sfmode, int sfcount, const struct in6_addr *psfsrc,
95 int delta);
96static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
97 struct inet6_dev *idev);
98
99#define MLD_QRV_DEFAULT 2
100/* RFC3810, 9.2. Query Interval */
101#define MLD_QI_DEFAULT (125 * HZ)
102/* RFC3810, 9.3. Query Response Interval */
103#define MLD_QRI_DEFAULT (10 * HZ)
104
105/* RFC3810, 8.1 Query Version Distinctions */
106#define MLD_V1_QUERY_LEN 24
107#define MLD_V2_QUERY_LEN_MIN 28
108
109#define IPV6_MLD_MAX_MSF 64
110
111int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
112int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
113
114/*
115 * socket join on multicast group
116 */
117
118#define for_each_pmc_rcu(np, pmc) \
119 for (pmc = rcu_dereference(np->ipv6_mc_list); \
120 pmc != NULL; \
121 pmc = rcu_dereference(pmc->next))
122
123static int unsolicited_report_interval(struct inet6_dev *idev)
124{
125 int iv;
126
127 if (mld_in_v1_mode(idev))
128 iv = idev->cnf.mldv1_unsolicited_report_interval;
129 else
130 iv = idev->cnf.mldv2_unsolicited_report_interval;
131
132 return iv > 0 ? iv : 1;
133}
134
135int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
136{
137 struct net_device *dev = NULL;
138 struct ipv6_mc_socklist *mc_lst;
139 struct ipv6_pinfo *np = inet6_sk(sk);
140 struct net *net = sock_net(sk);
141 int err;
142
143 ASSERT_RTNL();
144
145 if (!ipv6_addr_is_multicast(addr))
146 return -EINVAL;
147
148 rcu_read_lock();
149 for_each_pmc_rcu(np, mc_lst) {
150 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
151 ipv6_addr_equal(&mc_lst->addr, addr)) {
152 rcu_read_unlock();
153 return -EADDRINUSE;
154 }
155 }
156 rcu_read_unlock();
157
158 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
159
160 if (!mc_lst)
161 return -ENOMEM;
162
163 mc_lst->next = NULL;
164 mc_lst->addr = *addr;
165
166 if (ifindex == 0) {
167 struct rt6_info *rt;
168 rt = rt6_lookup(net, addr, NULL, 0, 0);
169 if (rt) {
170 dev = rt->dst.dev;
171 ip6_rt_put(rt);
172 }
173 } else
174 dev = __dev_get_by_index(net, ifindex);
175
176 if (!dev) {
177 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
178 return -ENODEV;
179 }
180
181 mc_lst->ifindex = dev->ifindex;
182 mc_lst->sfmode = MCAST_EXCLUDE;
183 rwlock_init(&mc_lst->sflock);
184 mc_lst->sflist = NULL;
185
186 /*
187 * now add/increase the group membership on the device
188 */
189
190 err = ipv6_dev_mc_inc(dev, addr);
191
192 if (err) {
193 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
194 return err;
195 }
196
197 mc_lst->next = np->ipv6_mc_list;
198 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
199
200 return 0;
201}
202EXPORT_SYMBOL(ipv6_sock_mc_join);
203
204/*
205 * socket leave on multicast group
206 */
207int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
208{
209 struct ipv6_pinfo *np = inet6_sk(sk);
210 struct ipv6_mc_socklist *mc_lst;
211 struct ipv6_mc_socklist __rcu **lnk;
212 struct net *net = sock_net(sk);
213
214 ASSERT_RTNL();
215
216 if (!ipv6_addr_is_multicast(addr))
217 return -EINVAL;
218
219 for (lnk = &np->ipv6_mc_list;
220 (mc_lst = rtnl_dereference(*lnk)) != NULL;
221 lnk = &mc_lst->next) {
222 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
223 ipv6_addr_equal(&mc_lst->addr, addr)) {
224 struct net_device *dev;
225
226 *lnk = mc_lst->next;
227
228 dev = __dev_get_by_index(net, mc_lst->ifindex);
229 if (dev) {
230 struct inet6_dev *idev = __in6_dev_get(dev);
231
232 (void) ip6_mc_leave_src(sk, mc_lst, idev);
233 if (idev)
234 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
235 } else
236 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
237
238 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
239 kfree_rcu(mc_lst, rcu);
240 return 0;
241 }
242 }
243
244 return -EADDRNOTAVAIL;
245}
246EXPORT_SYMBOL(ipv6_sock_mc_drop);
247
248/* called with rcu_read_lock() */
249static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
250 const struct in6_addr *group,
251 int ifindex)
252{
253 struct net_device *dev = NULL;
254 struct inet6_dev *idev = NULL;
255
256 if (ifindex == 0) {
257 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
258
259 if (rt) {
260 dev = rt->dst.dev;
261 ip6_rt_put(rt);
262 }
263 } else
264 dev = dev_get_by_index_rcu(net, ifindex);
265
266 if (!dev)
267 return NULL;
268 idev = __in6_dev_get(dev);
269 if (!idev)
270 return NULL;
271 read_lock_bh(&idev->lock);
272 if (idev->dead) {
273 read_unlock_bh(&idev->lock);
274 return NULL;
275 }
276 return idev;
277}
278
279void ipv6_sock_mc_close(struct sock *sk)
280{
281 struct ipv6_pinfo *np = inet6_sk(sk);
282 struct ipv6_mc_socklist *mc_lst;
283 struct net *net = sock_net(sk);
284
285 if (!rcu_access_pointer(np->ipv6_mc_list))
286 return;
287
288 rtnl_lock();
289 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
290 struct net_device *dev;
291
292 np->ipv6_mc_list = mc_lst->next;
293
294 dev = __dev_get_by_index(net, mc_lst->ifindex);
295 if (dev) {
296 struct inet6_dev *idev = __in6_dev_get(dev);
297
298 (void) ip6_mc_leave_src(sk, mc_lst, idev);
299 if (idev)
300 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
301 } else
302 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
303
304 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
305 kfree_rcu(mc_lst, rcu);
306
307 }
308 rtnl_unlock();
309}
310
311int ip6_mc_source(int add, int omode, struct sock *sk,
312 struct group_source_req *pgsr)
313{
314 struct in6_addr *source, *group;
315 struct ipv6_mc_socklist *pmc;
316 struct inet6_dev *idev;
317 struct ipv6_pinfo *inet6 = inet6_sk(sk);
318 struct ip6_sf_socklist *psl;
319 struct net *net = sock_net(sk);
320 int i, j, rv;
321 int leavegroup = 0;
322 int pmclocked = 0;
323 int err;
324
325 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
326 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
327
328 if (!ipv6_addr_is_multicast(group))
329 return -EINVAL;
330
331 rcu_read_lock();
332 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
333 if (!idev) {
334 rcu_read_unlock();
335 return -ENODEV;
336 }
337
338 err = -EADDRNOTAVAIL;
339
340 for_each_pmc_rcu(inet6, pmc) {
341 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
342 continue;
343 if (ipv6_addr_equal(&pmc->addr, group))
344 break;
345 }
346 if (!pmc) { /* must have a prior join */
347 err = -EINVAL;
348 goto done;
349 }
350 /* if a source filter was set, must be the same mode as before */
351 if (pmc->sflist) {
352 if (pmc->sfmode != omode) {
353 err = -EINVAL;
354 goto done;
355 }
356 } else if (pmc->sfmode != omode) {
357 /* allow mode switches for empty-set filters */
358 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
359 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
360 pmc->sfmode = omode;
361 }
362
363 write_lock(&pmc->sflock);
364 pmclocked = 1;
365
366 psl = pmc->sflist;
367 if (!add) {
368 if (!psl)
369 goto done; /* err = -EADDRNOTAVAIL */
370 rv = !0;
371 for (i = 0; i < psl->sl_count; i++) {
372 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
373 if (rv == 0)
374 break;
375 }
376 if (rv) /* source not found */
377 goto done; /* err = -EADDRNOTAVAIL */
378
379 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
380 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
381 leavegroup = 1;
382 goto done;
383 }
384
385 /* update the interface filter */
386 ip6_mc_del_src(idev, group, omode, 1, source, 1);
387
388 for (j = i+1; j < psl->sl_count; j++)
389 psl->sl_addr[j-1] = psl->sl_addr[j];
390 psl->sl_count--;
391 err = 0;
392 goto done;
393 }
394 /* else, add a new source to the filter */
395
396 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
397 err = -ENOBUFS;
398 goto done;
399 }
400 if (!psl || psl->sl_count == psl->sl_max) {
401 struct ip6_sf_socklist *newpsl;
402 int count = IP6_SFBLOCK;
403
404 if (psl)
405 count += psl->sl_max;
406 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
407 if (!newpsl) {
408 err = -ENOBUFS;
409 goto done;
410 }
411 newpsl->sl_max = count;
412 newpsl->sl_count = count - IP6_SFBLOCK;
413 if (psl) {
414 for (i = 0; i < psl->sl_count; i++)
415 newpsl->sl_addr[i] = psl->sl_addr[i];
416 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
417 }
418 pmc->sflist = psl = newpsl;
419 }
420 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
421 for (i = 0; i < psl->sl_count; i++) {
422 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
423 if (rv == 0) /* There is an error in the address. */
424 goto done;
425 }
426 for (j = psl->sl_count-1; j >= i; j--)
427 psl->sl_addr[j+1] = psl->sl_addr[j];
428 psl->sl_addr[i] = *source;
429 psl->sl_count++;
430 err = 0;
431 /* update the interface list */
432 ip6_mc_add_src(idev, group, omode, 1, source, 1);
433done:
434 if (pmclocked)
435 write_unlock(&pmc->sflock);
436 read_unlock_bh(&idev->lock);
437 rcu_read_unlock();
438 if (leavegroup)
439 err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
440 return err;
441}
442
443int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
444{
445 const struct in6_addr *group;
446 struct ipv6_mc_socklist *pmc;
447 struct inet6_dev *idev;
448 struct ipv6_pinfo *inet6 = inet6_sk(sk);
449 struct ip6_sf_socklist *newpsl, *psl;
450 struct net *net = sock_net(sk);
451 int leavegroup = 0;
452 int i, err;
453
454 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
455
456 if (!ipv6_addr_is_multicast(group))
457 return -EINVAL;
458 if (gsf->gf_fmode != MCAST_INCLUDE &&
459 gsf->gf_fmode != MCAST_EXCLUDE)
460 return -EINVAL;
461
462 rcu_read_lock();
463 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
464
465 if (!idev) {
466 rcu_read_unlock();
467 return -ENODEV;
468 }
469
470 err = 0;
471
472 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
473 leavegroup = 1;
474 goto done;
475 }
476
477 for_each_pmc_rcu(inet6, pmc) {
478 if (pmc->ifindex != gsf->gf_interface)
479 continue;
480 if (ipv6_addr_equal(&pmc->addr, group))
481 break;
482 }
483 if (!pmc) { /* must have a prior join */
484 err = -EINVAL;
485 goto done;
486 }
487 if (gsf->gf_numsrc) {
488 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
489 GFP_ATOMIC);
490 if (!newpsl) {
491 err = -ENOBUFS;
492 goto done;
493 }
494 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
495 for (i = 0; i < newpsl->sl_count; ++i) {
496 struct sockaddr_in6 *psin6;
497
498 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
499 newpsl->sl_addr[i] = psin6->sin6_addr;
500 }
501 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
502 newpsl->sl_count, newpsl->sl_addr, 0);
503 if (err) {
504 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
505 goto done;
506 }
507 } else {
508 newpsl = NULL;
509 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
510 }
511
512 write_lock(&pmc->sflock);
513 psl = pmc->sflist;
514 if (psl) {
515 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
516 psl->sl_count, psl->sl_addr, 0);
517 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
518 } else
519 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
520 pmc->sflist = newpsl;
521 pmc->sfmode = gsf->gf_fmode;
522 write_unlock(&pmc->sflock);
523 err = 0;
524done:
525 read_unlock_bh(&idev->lock);
526 rcu_read_unlock();
527 if (leavegroup)
528 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
529 return err;
530}
531
532int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
533 struct group_filter __user *optval, int __user *optlen)
534{
535 int err, i, count, copycount;
536 const struct in6_addr *group;
537 struct ipv6_mc_socklist *pmc;
538 struct inet6_dev *idev;
539 struct ipv6_pinfo *inet6 = inet6_sk(sk);
540 struct ip6_sf_socklist *psl;
541 struct net *net = sock_net(sk);
542
543 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
544
545 if (!ipv6_addr_is_multicast(group))
546 return -EINVAL;
547
548 rcu_read_lock();
549 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
550
551 if (!idev) {
552 rcu_read_unlock();
553 return -ENODEV;
554 }
555
556 err = -EADDRNOTAVAIL;
557 /* changes to the ipv6_mc_list require the socket lock and
558 * rtnl lock. We have the socket lock and rcu read lock,
559 * so reading the list is safe.
560 */
561
562 for_each_pmc_rcu(inet6, pmc) {
563 if (pmc->ifindex != gsf->gf_interface)
564 continue;
565 if (ipv6_addr_equal(group, &pmc->addr))
566 break;
567 }
568 if (!pmc) /* must have a prior join */
569 goto done;
570 gsf->gf_fmode = pmc->sfmode;
571 psl = pmc->sflist;
572 count = psl ? psl->sl_count : 0;
573 read_unlock_bh(&idev->lock);
574 rcu_read_unlock();
575
576 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
577 gsf->gf_numsrc = count;
578 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
579 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
580 return -EFAULT;
581 }
582 /* changes to psl require the socket lock, and a write lock
583 * on pmc->sflock. We have the socket lock so reading here is safe.
584 */
585 for (i = 0; i < copycount; i++) {
586 struct sockaddr_in6 *psin6;
587 struct sockaddr_storage ss;
588
589 psin6 = (struct sockaddr_in6 *)&ss;
590 memset(&ss, 0, sizeof(ss));
591 psin6->sin6_family = AF_INET6;
592 psin6->sin6_addr = psl->sl_addr[i];
593 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
594 return -EFAULT;
595 }
596 return 0;
597done:
598 read_unlock_bh(&idev->lock);
599 rcu_read_unlock();
600 return err;
601}
602
603bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
604 const struct in6_addr *src_addr)
605{
606 struct ipv6_pinfo *np = inet6_sk(sk);
607 struct ipv6_mc_socklist *mc;
608 struct ip6_sf_socklist *psl;
609 bool rv = true;
610
611 rcu_read_lock();
612 for_each_pmc_rcu(np, mc) {
613 if (ipv6_addr_equal(&mc->addr, mc_addr))
614 break;
615 }
616 if (!mc) {
617 rcu_read_unlock();
618 return true;
619 }
620 read_lock(&mc->sflock);
621 psl = mc->sflist;
622 if (!psl) {
623 rv = mc->sfmode == MCAST_EXCLUDE;
624 } else {
625 int i;
626
627 for (i = 0; i < psl->sl_count; i++) {
628 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
629 break;
630 }
631 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
632 rv = false;
633 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
634 rv = false;
635 }
636 read_unlock(&mc->sflock);
637 rcu_read_unlock();
638
639 return rv;
640}
641
642static void igmp6_group_added(struct ifmcaddr6 *mc)
643{
644 struct net_device *dev = mc->idev->dev;
645 char buf[MAX_ADDR_LEN];
646
647 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
648 IPV6_ADDR_SCOPE_LINKLOCAL)
649 return;
650
651 spin_lock_bh(&mc->mca_lock);
652 if (!(mc->mca_flags&MAF_LOADED)) {
653 mc->mca_flags |= MAF_LOADED;
654 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
655 dev_mc_add(dev, buf);
656 }
657 spin_unlock_bh(&mc->mca_lock);
658
659 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
660 return;
661
662 if (mld_in_v1_mode(mc->idev)) {
663 igmp6_join_group(mc);
664 return;
665 }
666 /* else v2 */
667
668 mc->mca_crcount = mc->idev->mc_qrv;
669 mld_ifc_event(mc->idev);
670}
671
672static void igmp6_group_dropped(struct ifmcaddr6 *mc)
673{
674 struct net_device *dev = mc->idev->dev;
675 char buf[MAX_ADDR_LEN];
676
677 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
678 IPV6_ADDR_SCOPE_LINKLOCAL)
679 return;
680
681 spin_lock_bh(&mc->mca_lock);
682 if (mc->mca_flags&MAF_LOADED) {
683 mc->mca_flags &= ~MAF_LOADED;
684 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
685 dev_mc_del(dev, buf);
686 }
687
688 if (mc->mca_flags & MAF_NOREPORT)
689 goto done;
690 spin_unlock_bh(&mc->mca_lock);
691
692 if (!mc->idev->dead)
693 igmp6_leave_group(mc);
694
695 spin_lock_bh(&mc->mca_lock);
696 if (del_timer(&mc->mca_timer))
697 atomic_dec(&mc->mca_refcnt);
698done:
699 ip6_mc_clear_src(mc);
700 spin_unlock_bh(&mc->mca_lock);
701}
702
703/*
704 * deleted ifmcaddr6 manipulation
705 */
706static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
707{
708 struct ifmcaddr6 *pmc;
709
710 /* this is an "ifmcaddr6" for convenience; only the fields below
711 * are actually used. In particular, the refcnt and users are not
712 * used for management of the delete list. Using the same structure
713 * for deleted items allows change reports to use common code with
714 * non-deleted or query-response MCA's.
715 */
716 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
717 if (!pmc)
718 return;
719
720 spin_lock_bh(&im->mca_lock);
721 spin_lock_init(&pmc->mca_lock);
722 pmc->idev = im->idev;
723 in6_dev_hold(idev);
724 pmc->mca_addr = im->mca_addr;
725 pmc->mca_crcount = idev->mc_qrv;
726 pmc->mca_sfmode = im->mca_sfmode;
727 if (pmc->mca_sfmode == MCAST_INCLUDE) {
728 struct ip6_sf_list *psf;
729
730 pmc->mca_tomb = im->mca_tomb;
731 pmc->mca_sources = im->mca_sources;
732 im->mca_tomb = im->mca_sources = NULL;
733 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
734 psf->sf_crcount = pmc->mca_crcount;
735 }
736 spin_unlock_bh(&im->mca_lock);
737
738 spin_lock_bh(&idev->mc_lock);
739 pmc->next = idev->mc_tomb;
740 idev->mc_tomb = pmc;
741 spin_unlock_bh(&idev->mc_lock);
742}
743
744static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
745{
746 struct ifmcaddr6 *pmc, *pmc_prev;
747 struct ip6_sf_list *psf, *psf_next;
748
749 spin_lock_bh(&idev->mc_lock);
750 pmc_prev = NULL;
751 for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
752 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
753 break;
754 pmc_prev = pmc;
755 }
756 if (pmc) {
757 if (pmc_prev)
758 pmc_prev->next = pmc->next;
759 else
760 idev->mc_tomb = pmc->next;
761 }
762 spin_unlock_bh(&idev->mc_lock);
763
764 if (pmc) {
765 for (psf = pmc->mca_tomb; psf; psf = psf_next) {
766 psf_next = psf->sf_next;
767 kfree(psf);
768 }
769 in6_dev_put(pmc->idev);
770 kfree(pmc);
771 }
772}
773
774static void mld_clear_delrec(struct inet6_dev *idev)
775{
776 struct ifmcaddr6 *pmc, *nextpmc;
777
778 spin_lock_bh(&idev->mc_lock);
779 pmc = idev->mc_tomb;
780 idev->mc_tomb = NULL;
781 spin_unlock_bh(&idev->mc_lock);
782
783 for (; pmc; pmc = nextpmc) {
784 nextpmc = pmc->next;
785 ip6_mc_clear_src(pmc);
786 in6_dev_put(pmc->idev);
787 kfree(pmc);
788 }
789
790 /* clear dead sources, too */
791 read_lock_bh(&idev->lock);
792 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
793 struct ip6_sf_list *psf, *psf_next;
794
795 spin_lock_bh(&pmc->mca_lock);
796 psf = pmc->mca_tomb;
797 pmc->mca_tomb = NULL;
798 spin_unlock_bh(&pmc->mca_lock);
799 for (; psf; psf = psf_next) {
800 psf_next = psf->sf_next;
801 kfree(psf);
802 }
803 }
804 read_unlock_bh(&idev->lock);
805}
806
807static void mca_get(struct ifmcaddr6 *mc)
808{
809 atomic_inc(&mc->mca_refcnt);
810}
811
812static void ma_put(struct ifmcaddr6 *mc)
813{
814 if (atomic_dec_and_test(&mc->mca_refcnt)) {
815 in6_dev_put(mc->idev);
816 kfree(mc);
817 }
818}
819
820static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
821 const struct in6_addr *addr)
822{
823 struct ifmcaddr6 *mc;
824
825 mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
826 if (!mc)
827 return NULL;
828
829 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
830
831 mc->mca_addr = *addr;
832 mc->idev = idev; /* reference taken by caller */
833 mc->mca_users = 1;
834 /* mca_stamp should be updated upon changes */
835 mc->mca_cstamp = mc->mca_tstamp = jiffies;
836 atomic_set(&mc->mca_refcnt, 1);
837 spin_lock_init(&mc->mca_lock);
838
839 /* initial mode is (EX, empty) */
840 mc->mca_sfmode = MCAST_EXCLUDE;
841 mc->mca_sfcount[MCAST_EXCLUDE] = 1;
842
843 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
844 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
845 mc->mca_flags |= MAF_NOREPORT;
846
847 return mc;
848}
849
850/*
851 * device multicast group inc (add if not found)
852 */
853int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
854{
855 struct ifmcaddr6 *mc;
856 struct inet6_dev *idev;
857
858 ASSERT_RTNL();
859
860 /* we need to take a reference on idev */
861 idev = in6_dev_get(dev);
862
863 if (!idev)
864 return -EINVAL;
865
866 write_lock_bh(&idev->lock);
867 if (idev->dead) {
868 write_unlock_bh(&idev->lock);
869 in6_dev_put(idev);
870 return -ENODEV;
871 }
872
873 for (mc = idev->mc_list; mc; mc = mc->next) {
874 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
875 mc->mca_users++;
876 write_unlock_bh(&idev->lock);
877 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
878 NULL, 0);
879 in6_dev_put(idev);
880 return 0;
881 }
882 }
883
884 mc = mca_alloc(idev, addr);
885 if (!mc) {
886 write_unlock_bh(&idev->lock);
887 in6_dev_put(idev);
888 return -ENOMEM;
889 }
890
891 mc->next = idev->mc_list;
892 idev->mc_list = mc;
893
894 /* Hold this for the code below before we unlock,
895 * it is already exposed via idev->mc_list.
896 */
897 mca_get(mc);
898 write_unlock_bh(&idev->lock);
899
900 mld_del_delrec(idev, &mc->mca_addr);
901 igmp6_group_added(mc);
902 ma_put(mc);
903 return 0;
904}
905
906/*
907 * device multicast group del
908 */
909int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
910{
911 struct ifmcaddr6 *ma, **map;
912
913 ASSERT_RTNL();
914
915 write_lock_bh(&idev->lock);
916 for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) {
917 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
918 if (--ma->mca_users == 0) {
919 *map = ma->next;
920 write_unlock_bh(&idev->lock);
921
922 igmp6_group_dropped(ma);
923
924 ma_put(ma);
925 return 0;
926 }
927 write_unlock_bh(&idev->lock);
928 return 0;
929 }
930 }
931 write_unlock_bh(&idev->lock);
932
933 return -ENOENT;
934}
935
936int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
937{
938 struct inet6_dev *idev;
939 int err;
940
941 ASSERT_RTNL();
942
943 idev = __in6_dev_get(dev);
944 if (!idev)
945 err = -ENODEV;
946 else
947 err = __ipv6_dev_mc_dec(idev, addr);
948
949 return err;
950}
951
952/*
953 * check if the interface/address pair is valid
954 */
955bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
956 const struct in6_addr *src_addr)
957{
958 struct inet6_dev *idev;
959 struct ifmcaddr6 *mc;
960 bool rv = false;
961
962 rcu_read_lock();
963 idev = __in6_dev_get(dev);
964 if (idev) {
965 read_lock_bh(&idev->lock);
966 for (mc = idev->mc_list; mc; mc = mc->next) {
967 if (ipv6_addr_equal(&mc->mca_addr, group))
968 break;
969 }
970 if (mc) {
971 if (src_addr && !ipv6_addr_any(src_addr)) {
972 struct ip6_sf_list *psf;
973
974 spin_lock_bh(&mc->mca_lock);
975 for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
976 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
977 break;
978 }
979 if (psf)
980 rv = psf->sf_count[MCAST_INCLUDE] ||
981 psf->sf_count[MCAST_EXCLUDE] !=
982 mc->mca_sfcount[MCAST_EXCLUDE];
983 else
984 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
985 spin_unlock_bh(&mc->mca_lock);
986 } else
987 rv = true; /* don't filter unspecified source */
988 }
989 read_unlock_bh(&idev->lock);
990 }
991 rcu_read_unlock();
992 return rv;
993}
994
995static void mld_gq_start_timer(struct inet6_dev *idev)
996{
997 unsigned long tv = prandom_u32() % idev->mc_maxdelay;
998
999 idev->mc_gq_running = 1;
1000 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1001 in6_dev_hold(idev);
1002}
1003
1004static void mld_gq_stop_timer(struct inet6_dev *idev)
1005{
1006 idev->mc_gq_running = 0;
1007 if (del_timer(&idev->mc_gq_timer))
1008 __in6_dev_put(idev);
1009}
1010
1011static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
1012{
1013 unsigned long tv = prandom_u32() % delay;
1014
1015 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1016 in6_dev_hold(idev);
1017}
1018
1019static void mld_ifc_stop_timer(struct inet6_dev *idev)
1020{
1021 idev->mc_ifc_count = 0;
1022 if (del_timer(&idev->mc_ifc_timer))
1023 __in6_dev_put(idev);
1024}
1025
1026static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
1027{
1028 unsigned long tv = prandom_u32() % delay;
1029
1030 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
1031 in6_dev_hold(idev);
1032}
1033
1034static void mld_dad_stop_timer(struct inet6_dev *idev)
1035{
1036 if (del_timer(&idev->mc_dad_timer))
1037 __in6_dev_put(idev);
1038}
1039
1040/*
1041 * IGMP handling (alias multicast ICMPv6 messages)
1042 */
1043
1044static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1045{
1046 unsigned long delay = resptime;
1047
1048 /* Do not start timer for these addresses */
1049 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1050 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1051 return;
1052
1053 if (del_timer(&ma->mca_timer)) {
1054 atomic_dec(&ma->mca_refcnt);
1055 delay = ma->mca_timer.expires - jiffies;
1056 }
1057
1058 if (delay >= resptime)
1059 delay = prandom_u32() % resptime;
1060
1061 ma->mca_timer.expires = jiffies + delay;
1062 if (!mod_timer(&ma->mca_timer, jiffies + delay))
1063 atomic_inc(&ma->mca_refcnt);
1064 ma->mca_flags |= MAF_TIMER_RUNNING;
1065}
1066
1067/* mark EXCLUDE-mode sources */
1068static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1069 const struct in6_addr *srcs)
1070{
1071 struct ip6_sf_list *psf;
1072 int i, scount;
1073
1074 scount = 0;
1075 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1076 if (scount == nsrcs)
1077 break;
1078 for (i = 0; i < nsrcs; i++) {
1079 /* skip inactive filters */
1080 if (psf->sf_count[MCAST_INCLUDE] ||
1081 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1082 psf->sf_count[MCAST_EXCLUDE])
1083 break;
1084 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1085 scount++;
1086 break;
1087 }
1088 }
1089 }
1090 pmc->mca_flags &= ~MAF_GSQUERY;
1091 if (scount == nsrcs) /* all sources excluded */
1092 return false;
1093 return true;
1094}
1095
1096static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1097 const struct in6_addr *srcs)
1098{
1099 struct ip6_sf_list *psf;
1100 int i, scount;
1101
1102 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1103 return mld_xmarksources(pmc, nsrcs, srcs);
1104
1105 /* mark INCLUDE-mode sources */
1106
1107 scount = 0;
1108 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1109 if (scount == nsrcs)
1110 break;
1111 for (i = 0; i < nsrcs; i++) {
1112 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1113 psf->sf_gsresp = 1;
1114 scount++;
1115 break;
1116 }
1117 }
1118 }
1119 if (!scount) {
1120 pmc->mca_flags &= ~MAF_GSQUERY;
1121 return false;
1122 }
1123 pmc->mca_flags |= MAF_GSQUERY;
1124 return true;
1125}
1126
1127static int mld_force_mld_version(const struct inet6_dev *idev)
1128{
1129 /* Normally, both are 0 here. If enforcement to a particular is
1130 * being used, individual device enforcement will have a lower
1131 * precedence over 'all' device (.../conf/all/force_mld_version).
1132 */
1133
1134 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1135 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1136 else
1137 return idev->cnf.force_mld_version;
1138}
1139
1140static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1141{
1142 return mld_force_mld_version(idev) == 2;
1143}
1144
1145static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1146{
1147 return mld_force_mld_version(idev) == 1;
1148}
1149
1150static bool mld_in_v1_mode(const struct inet6_dev *idev)
1151{
1152 if (mld_in_v2_mode_only(idev))
1153 return false;
1154 if (mld_in_v1_mode_only(idev))
1155 return true;
1156 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1157 return true;
1158
1159 return false;
1160}
1161
1162static void mld_set_v1_mode(struct inet6_dev *idev)
1163{
1164 /* RFC3810, relevant sections:
1165 * - 9.1. Robustness Variable
1166 * - 9.2. Query Interval
1167 * - 9.3. Query Response Interval
1168 * - 9.12. Older Version Querier Present Timeout
1169 */
1170 unsigned long switchback;
1171
1172 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1173
1174 idev->mc_v1_seen = jiffies + switchback;
1175}
1176
1177static void mld_update_qrv(struct inet6_dev *idev,
1178 const struct mld2_query *mlh2)
1179{
1180 /* RFC3810, relevant sections:
1181 * - 5.1.8. QRV (Querier's Robustness Variable)
1182 * - 9.1. Robustness Variable
1183 */
1184
1185 /* The value of the Robustness Variable MUST NOT be zero,
1186 * and SHOULD NOT be one. Catch this here if we ever run
1187 * into such a case in future.
1188 */
1189 const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1190 WARN_ON(idev->mc_qrv == 0);
1191
1192 if (mlh2->mld2q_qrv > 0)
1193 idev->mc_qrv = mlh2->mld2q_qrv;
1194
1195 if (unlikely(idev->mc_qrv < min_qrv)) {
1196 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1197 idev->mc_qrv, min_qrv);
1198 idev->mc_qrv = min_qrv;
1199 }
1200}
1201
1202static void mld_update_qi(struct inet6_dev *idev,
1203 const struct mld2_query *mlh2)
1204{
1205 /* RFC3810, relevant sections:
1206 * - 5.1.9. QQIC (Querier's Query Interval Code)
1207 * - 9.2. Query Interval
1208 * - 9.12. Older Version Querier Present Timeout
1209 * (the [Query Interval] in the last Query received)
1210 */
1211 unsigned long mc_qqi;
1212
1213 if (mlh2->mld2q_qqic < 128) {
1214 mc_qqi = mlh2->mld2q_qqic;
1215 } else {
1216 unsigned long mc_man, mc_exp;
1217
1218 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1219 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1220
1221 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1222 }
1223
1224 idev->mc_qi = mc_qqi * HZ;
1225}
1226
1227static void mld_update_qri(struct inet6_dev *idev,
1228 const struct mld2_query *mlh2)
1229{
1230 /* RFC3810, relevant sections:
1231 * - 5.1.3. Maximum Response Code
1232 * - 9.3. Query Response Interval
1233 */
1234 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1235}
1236
1237static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1238 unsigned long *max_delay, bool v1_query)
1239{
1240 unsigned long mldv1_md;
1241
1242 /* Ignore v1 queries */
1243 if (mld_in_v2_mode_only(idev))
1244 return -EINVAL;
1245
1246 mldv1_md = ntohs(mld->mld_maxdelay);
1247
1248 /* When in MLDv1 fallback and a MLDv2 router start-up being
1249 * unaware of current MLDv1 operation, the MRC == MRD mapping
1250 * only works when the exponential algorithm is not being
1251 * used (as MLDv1 is unaware of such things).
1252 *
1253 * According to the RFC author, the MLDv2 implementations
1254 * he's aware of all use a MRC < 32768 on start up queries.
1255 *
1256 * Thus, should we *ever* encounter something else larger
1257 * than that, just assume the maximum possible within our
1258 * reach.
1259 */
1260 if (!v1_query)
1261 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1262
1263 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1264
1265 /* MLDv1 router present: we need to go into v1 mode *only*
1266 * when an MLDv1 query is received as per section 9.12. of
1267 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1268 * queries MUST be of exactly 24 octets.
1269 */
1270 if (v1_query)
1271 mld_set_v1_mode(idev);
1272
1273 /* cancel MLDv2 report timer */
1274 mld_gq_stop_timer(idev);
1275 /* cancel the interface change timer */
1276 mld_ifc_stop_timer(idev);
1277 /* clear deleted report items */
1278 mld_clear_delrec(idev);
1279
1280 return 0;
1281}
1282
1283static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1284 unsigned long *max_delay)
1285{
1286 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1287
1288 mld_update_qrv(idev, mld);
1289 mld_update_qi(idev, mld);
1290 mld_update_qri(idev, mld);
1291
1292 idev->mc_maxdelay = *max_delay;
1293
1294 return 0;
1295}
1296
1297/* called with rcu_read_lock() */
1298int igmp6_event_query(struct sk_buff *skb)
1299{
1300 struct mld2_query *mlh2 = NULL;
1301 struct ifmcaddr6 *ma;
1302 const struct in6_addr *group;
1303 unsigned long max_delay;
1304 struct inet6_dev *idev;
1305 struct mld_msg *mld;
1306 int group_type;
1307 int mark = 0;
1308 int len, err;
1309
1310 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1311 return -EINVAL;
1312
1313 /* compute payload length excluding extension headers */
1314 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1315 len -= skb_network_header_len(skb);
1316
1317 /* RFC3810 6.2
1318 * Upon reception of an MLD message that contains a Query, the node
1319 * checks if the source address of the message is a valid link-local
1320 * address, if the Hop Limit is set to 1, and if the Router Alert
1321 * option is present in the Hop-By-Hop Options header of the IPv6
1322 * packet. If any of these checks fails, the packet is dropped.
1323 */
1324 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1325 ipv6_hdr(skb)->hop_limit != 1 ||
1326 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1327 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1328 return -EINVAL;
1329
1330 idev = __in6_dev_get(skb->dev);
1331 if (!idev)
1332 return 0;
1333
1334 mld = (struct mld_msg *)icmp6_hdr(skb);
1335 group = &mld->mld_mca;
1336 group_type = ipv6_addr_type(group);
1337
1338 if (group_type != IPV6_ADDR_ANY &&
1339 !(group_type&IPV6_ADDR_MULTICAST))
1340 return -EINVAL;
1341
1342 if (len < MLD_V1_QUERY_LEN) {
1343 return -EINVAL;
1344 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1345 err = mld_process_v1(idev, mld, &max_delay,
1346 len == MLD_V1_QUERY_LEN);
1347 if (err < 0)
1348 return err;
1349 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1350 int srcs_offset = sizeof(struct mld2_query) -
1351 sizeof(struct icmp6hdr);
1352
1353 if (!pskb_may_pull(skb, srcs_offset))
1354 return -EINVAL;
1355
1356 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1357
1358 err = mld_process_v2(idev, mlh2, &max_delay);
1359 if (err < 0)
1360 return err;
1361
1362 if (group_type == IPV6_ADDR_ANY) { /* general query */
1363 if (mlh2->mld2q_nsrcs)
1364 return -EINVAL; /* no sources allowed */
1365
1366 mld_gq_start_timer(idev);
1367 return 0;
1368 }
1369 /* mark sources to include, if group & source-specific */
1370 if (mlh2->mld2q_nsrcs != 0) {
1371 if (!pskb_may_pull(skb, srcs_offset +
1372 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1373 return -EINVAL;
1374
1375 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1376 mark = 1;
1377 }
1378 } else {
1379 return -EINVAL;
1380 }
1381
1382 read_lock_bh(&idev->lock);
1383 if (group_type == IPV6_ADDR_ANY) {
1384 for (ma = idev->mc_list; ma; ma = ma->next) {
1385 spin_lock_bh(&ma->mca_lock);
1386 igmp6_group_queried(ma, max_delay);
1387 spin_unlock_bh(&ma->mca_lock);
1388 }
1389 } else {
1390 for (ma = idev->mc_list; ma; ma = ma->next) {
1391 if (!ipv6_addr_equal(group, &ma->mca_addr))
1392 continue;
1393 spin_lock_bh(&ma->mca_lock);
1394 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1395 /* gsquery <- gsquery && mark */
1396 if (!mark)
1397 ma->mca_flags &= ~MAF_GSQUERY;
1398 } else {
1399 /* gsquery <- mark */
1400 if (mark)
1401 ma->mca_flags |= MAF_GSQUERY;
1402 else
1403 ma->mca_flags &= ~MAF_GSQUERY;
1404 }
1405 if (!(ma->mca_flags & MAF_GSQUERY) ||
1406 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1407 igmp6_group_queried(ma, max_delay);
1408 spin_unlock_bh(&ma->mca_lock);
1409 break;
1410 }
1411 }
1412 read_unlock_bh(&idev->lock);
1413
1414 return 0;
1415}
1416
1417/* called with rcu_read_lock() */
1418int igmp6_event_report(struct sk_buff *skb)
1419{
1420 struct ifmcaddr6 *ma;
1421 struct inet6_dev *idev;
1422 struct mld_msg *mld;
1423 int addr_type;
1424
1425 /* Our own report looped back. Ignore it. */
1426 if (skb->pkt_type == PACKET_LOOPBACK)
1427 return 0;
1428
1429 /* send our report if the MC router may not have heard this report */
1430 if (skb->pkt_type != PACKET_MULTICAST &&
1431 skb->pkt_type != PACKET_BROADCAST)
1432 return 0;
1433
1434 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1435 return -EINVAL;
1436
1437 mld = (struct mld_msg *)icmp6_hdr(skb);
1438
1439 /* Drop reports with not link local source */
1440 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1441 if (addr_type != IPV6_ADDR_ANY &&
1442 !(addr_type&IPV6_ADDR_LINKLOCAL))
1443 return -EINVAL;
1444
1445 idev = __in6_dev_get(skb->dev);
1446 if (!idev)
1447 return -ENODEV;
1448
1449 /*
1450 * Cancel the timer for this group
1451 */
1452
1453 read_lock_bh(&idev->lock);
1454 for (ma = idev->mc_list; ma; ma = ma->next) {
1455 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1456 spin_lock(&ma->mca_lock);
1457 if (del_timer(&ma->mca_timer))
1458 atomic_dec(&ma->mca_refcnt);
1459 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1460 spin_unlock(&ma->mca_lock);
1461 break;
1462 }
1463 }
1464 read_unlock_bh(&idev->lock);
1465 return 0;
1466}
1467
1468static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1469 int gdeleted, int sdeleted)
1470{
1471 switch (type) {
1472 case MLD2_MODE_IS_INCLUDE:
1473 case MLD2_MODE_IS_EXCLUDE:
1474 if (gdeleted || sdeleted)
1475 return false;
1476 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1477 if (pmc->mca_sfmode == MCAST_INCLUDE)
1478 return true;
1479 /* don't include if this source is excluded
1480 * in all filters
1481 */
1482 if (psf->sf_count[MCAST_INCLUDE])
1483 return type == MLD2_MODE_IS_INCLUDE;
1484 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1485 psf->sf_count[MCAST_EXCLUDE];
1486 }
1487 return false;
1488 case MLD2_CHANGE_TO_INCLUDE:
1489 if (gdeleted || sdeleted)
1490 return false;
1491 return psf->sf_count[MCAST_INCLUDE] != 0;
1492 case MLD2_CHANGE_TO_EXCLUDE:
1493 if (gdeleted || sdeleted)
1494 return false;
1495 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1496 psf->sf_count[MCAST_INCLUDE])
1497 return false;
1498 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1499 psf->sf_count[MCAST_EXCLUDE];
1500 case MLD2_ALLOW_NEW_SOURCES:
1501 if (gdeleted || !psf->sf_crcount)
1502 return false;
1503 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1504 case MLD2_BLOCK_OLD_SOURCES:
1505 if (pmc->mca_sfmode == MCAST_INCLUDE)
1506 return gdeleted || (psf->sf_crcount && sdeleted);
1507 return psf->sf_crcount && !gdeleted && !sdeleted;
1508 }
1509 return false;
1510}
1511
1512static int
1513mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1514{
1515 struct ip6_sf_list *psf;
1516 int scount = 0;
1517
1518 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1519 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1520 continue;
1521 scount++;
1522 }
1523 return scount;
1524}
1525
1526static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1527 struct net_device *dev,
1528 const struct in6_addr *saddr,
1529 const struct in6_addr *daddr,
1530 int proto, int len)
1531{
1532 struct ipv6hdr *hdr;
1533
1534 skb->protocol = htons(ETH_P_IPV6);
1535 skb->dev = dev;
1536
1537 skb_reset_network_header(skb);
1538 skb_put(skb, sizeof(struct ipv6hdr));
1539 hdr = ipv6_hdr(skb);
1540
1541 ip6_flow_hdr(hdr, 0, 0);
1542
1543 hdr->payload_len = htons(len);
1544 hdr->nexthdr = proto;
1545 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1546
1547 hdr->saddr = *saddr;
1548 hdr->daddr = *daddr;
1549}
1550
1551static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1552{
1553 struct net_device *dev = idev->dev;
1554 struct net *net = dev_net(dev);
1555 struct sock *sk = net->ipv6.igmp_sk;
1556 struct sk_buff *skb;
1557 struct mld2_report *pmr;
1558 struct in6_addr addr_buf;
1559 const struct in6_addr *saddr;
1560 int hlen = LL_RESERVED_SPACE(dev);
1561 int tlen = dev->needed_tailroom;
1562 unsigned int size = mtu + hlen + tlen;
1563 int err;
1564 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1565 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1566 IPV6_TLV_PADN, 0 };
1567
1568 /* we assume size > sizeof(ra) here */
1569 /* limit our allocations to order-0 page */
1570 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1571 skb = sock_alloc_send_skb(sk, size, 1, &err);
1572
1573 if (!skb)
1574 return NULL;
1575
1576 skb->priority = TC_PRIO_CONTROL;
1577 skb_reserve(skb, hlen);
1578 skb_tailroom_reserve(skb, mtu, tlen);
1579
1580 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
1581 /* <draft-ietf-magma-mld-source-05.txt>:
1582 * use unspecified address as the source address
1583 * when a valid link-local address is not available.
1584 */
1585 saddr = &in6addr_any;
1586 } else
1587 saddr = &addr_buf;
1588
1589 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1590
1591 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1592
1593 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1594 skb_put(skb, sizeof(*pmr));
1595 pmr = (struct mld2_report *)skb_transport_header(skb);
1596 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1597 pmr->mld2r_resv1 = 0;
1598 pmr->mld2r_cksum = 0;
1599 pmr->mld2r_resv2 = 0;
1600 pmr->mld2r_ngrec = 0;
1601 return skb;
1602}
1603
1604static void mld_sendpack(struct sk_buff *skb)
1605{
1606 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1607 struct mld2_report *pmr =
1608 (struct mld2_report *)skb_transport_header(skb);
1609 int payload_len, mldlen;
1610 struct inet6_dev *idev;
1611 struct net *net = dev_net(skb->dev);
1612 int err;
1613 struct flowi6 fl6;
1614 struct dst_entry *dst;
1615
1616 rcu_read_lock();
1617 idev = __in6_dev_get(skb->dev);
1618 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1619
1620 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1621 sizeof(*pip6);
1622 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1623 pip6->payload_len = htons(payload_len);
1624
1625 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1626 IPPROTO_ICMPV6,
1627 csum_partial(skb_transport_header(skb),
1628 mldlen, 0));
1629
1630 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1631 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1632 skb->dev->ifindex);
1633 dst = icmp6_dst_alloc(skb->dev, &fl6);
1634
1635 err = 0;
1636 if (IS_ERR(dst)) {
1637 err = PTR_ERR(dst);
1638 dst = NULL;
1639 }
1640 skb_dst_set(skb, dst);
1641 if (err)
1642 goto err_out;
1643
1644 payload_len = skb->len;
1645
1646 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1647 net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1648 dst_output);
1649out:
1650 if (!err) {
1651 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1652 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1653 } else {
1654 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1655 }
1656
1657 rcu_read_unlock();
1658 return;
1659
1660err_out:
1661 kfree_skb(skb);
1662 goto out;
1663}
1664
1665static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1666{
1667 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1668}
1669
1670static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1671 int type, struct mld2_grec **ppgr)
1672{
1673 struct net_device *dev = pmc->idev->dev;
1674 struct mld2_report *pmr;
1675 struct mld2_grec *pgr;
1676
1677 if (!skb)
1678 skb = mld_newpack(pmc->idev, dev->mtu);
1679 if (!skb)
1680 return NULL;
1681 pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
1682 pgr->grec_type = type;
1683 pgr->grec_auxwords = 0;
1684 pgr->grec_nsrcs = 0;
1685 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1686 pmr = (struct mld2_report *)skb_transport_header(skb);
1687 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1688 *ppgr = pgr;
1689 return skb;
1690}
1691
1692#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1693
1694static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1695 int type, int gdeleted, int sdeleted, int crsend)
1696{
1697 struct inet6_dev *idev = pmc->idev;
1698 struct net_device *dev = idev->dev;
1699 struct mld2_report *pmr;
1700 struct mld2_grec *pgr = NULL;
1701 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1702 int scount, stotal, first, isquery, truncate;
1703
1704 if (pmc->mca_flags & MAF_NOREPORT)
1705 return skb;
1706
1707 isquery = type == MLD2_MODE_IS_INCLUDE ||
1708 type == MLD2_MODE_IS_EXCLUDE;
1709 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1710 type == MLD2_CHANGE_TO_EXCLUDE;
1711
1712 stotal = scount = 0;
1713
1714 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1715
1716 if (!*psf_list)
1717 goto empty_source;
1718
1719 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1720
1721 /* EX and TO_EX get a fresh packet, if needed */
1722 if (truncate) {
1723 if (pmr && pmr->mld2r_ngrec &&
1724 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1725 if (skb)
1726 mld_sendpack(skb);
1727 skb = mld_newpack(idev, dev->mtu);
1728 }
1729 }
1730 first = 1;
1731 psf_prev = NULL;
1732 for (psf = *psf_list; psf; psf = psf_next) {
1733 struct in6_addr *psrc;
1734
1735 psf_next = psf->sf_next;
1736
1737 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
1738 psf_prev = psf;
1739 continue;
1740 }
1741
1742 /* clear marks on query responses */
1743 if (isquery)
1744 psf->sf_gsresp = 0;
1745
1746 if (AVAILABLE(skb) < sizeof(*psrc) +
1747 first*sizeof(struct mld2_grec)) {
1748 if (truncate && !first)
1749 break; /* truncate these */
1750 if (pgr)
1751 pgr->grec_nsrcs = htons(scount);
1752 if (skb)
1753 mld_sendpack(skb);
1754 skb = mld_newpack(idev, dev->mtu);
1755 first = 1;
1756 scount = 0;
1757 }
1758 if (first) {
1759 skb = add_grhead(skb, pmc, type, &pgr);
1760 first = 0;
1761 }
1762 if (!skb)
1763 return NULL;
1764 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1765 *psrc = psf->sf_addr;
1766 scount++; stotal++;
1767 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1768 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1769 psf->sf_crcount--;
1770 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1771 if (psf_prev)
1772 psf_prev->sf_next = psf->sf_next;
1773 else
1774 *psf_list = psf->sf_next;
1775 kfree(psf);
1776 continue;
1777 }
1778 }
1779 psf_prev = psf;
1780 }
1781
1782empty_source:
1783 if (!stotal) {
1784 if (type == MLD2_ALLOW_NEW_SOURCES ||
1785 type == MLD2_BLOCK_OLD_SOURCES)
1786 return skb;
1787 if (pmc->mca_crcount || isquery || crsend) {
1788 /* make sure we have room for group header */
1789 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1790 mld_sendpack(skb);
1791 skb = NULL; /* add_grhead will get a new one */
1792 }
1793 skb = add_grhead(skb, pmc, type, &pgr);
1794 }
1795 }
1796 if (pgr)
1797 pgr->grec_nsrcs = htons(scount);
1798
1799 if (isquery)
1800 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1801 return skb;
1802}
1803
1804static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1805{
1806 struct sk_buff *skb = NULL;
1807 int type;
1808
1809 read_lock_bh(&idev->lock);
1810 if (!pmc) {
1811 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1812 if (pmc->mca_flags & MAF_NOREPORT)
1813 continue;
1814 spin_lock_bh(&pmc->mca_lock);
1815 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1816 type = MLD2_MODE_IS_EXCLUDE;
1817 else
1818 type = MLD2_MODE_IS_INCLUDE;
1819 skb = add_grec(skb, pmc, type, 0, 0, 0);
1820 spin_unlock_bh(&pmc->mca_lock);
1821 }
1822 } else {
1823 spin_lock_bh(&pmc->mca_lock);
1824 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1825 type = MLD2_MODE_IS_EXCLUDE;
1826 else
1827 type = MLD2_MODE_IS_INCLUDE;
1828 skb = add_grec(skb, pmc, type, 0, 0, 0);
1829 spin_unlock_bh(&pmc->mca_lock);
1830 }
1831 read_unlock_bh(&idev->lock);
1832 if (skb)
1833 mld_sendpack(skb);
1834}
1835
1836/*
1837 * remove zero-count source records from a source filter list
1838 */
1839static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1840{
1841 struct ip6_sf_list *psf_prev, *psf_next, *psf;
1842
1843 psf_prev = NULL;
1844 for (psf = *ppsf; psf; psf = psf_next) {
1845 psf_next = psf->sf_next;
1846 if (psf->sf_crcount == 0) {
1847 if (psf_prev)
1848 psf_prev->sf_next = psf->sf_next;
1849 else
1850 *ppsf = psf->sf_next;
1851 kfree(psf);
1852 } else
1853 psf_prev = psf;
1854 }
1855}
1856
1857static void mld_send_cr(struct inet6_dev *idev)
1858{
1859 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1860 struct sk_buff *skb = NULL;
1861 int type, dtype;
1862
1863 read_lock_bh(&idev->lock);
1864 spin_lock(&idev->mc_lock);
1865
1866 /* deleted MCA's */
1867 pmc_prev = NULL;
1868 for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
1869 pmc_next = pmc->next;
1870 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1871 type = MLD2_BLOCK_OLD_SOURCES;
1872 dtype = MLD2_BLOCK_OLD_SOURCES;
1873 skb = add_grec(skb, pmc, type, 1, 0, 0);
1874 skb = add_grec(skb, pmc, dtype, 1, 1, 0);
1875 }
1876 if (pmc->mca_crcount) {
1877 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1878 type = MLD2_CHANGE_TO_INCLUDE;
1879 skb = add_grec(skb, pmc, type, 1, 0, 0);
1880 }
1881 pmc->mca_crcount--;
1882 if (pmc->mca_crcount == 0) {
1883 mld_clear_zeros(&pmc->mca_tomb);
1884 mld_clear_zeros(&pmc->mca_sources);
1885 }
1886 }
1887 if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1888 !pmc->mca_sources) {
1889 if (pmc_prev)
1890 pmc_prev->next = pmc_next;
1891 else
1892 idev->mc_tomb = pmc_next;
1893 in6_dev_put(pmc->idev);
1894 kfree(pmc);
1895 } else
1896 pmc_prev = pmc;
1897 }
1898 spin_unlock(&idev->mc_lock);
1899
1900 /* change recs */
1901 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1902 spin_lock_bh(&pmc->mca_lock);
1903 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1904 type = MLD2_BLOCK_OLD_SOURCES;
1905 dtype = MLD2_ALLOW_NEW_SOURCES;
1906 } else {
1907 type = MLD2_ALLOW_NEW_SOURCES;
1908 dtype = MLD2_BLOCK_OLD_SOURCES;
1909 }
1910 skb = add_grec(skb, pmc, type, 0, 0, 0);
1911 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
1912
1913 /* filter mode changes */
1914 if (pmc->mca_crcount) {
1915 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1916 type = MLD2_CHANGE_TO_EXCLUDE;
1917 else
1918 type = MLD2_CHANGE_TO_INCLUDE;
1919 skb = add_grec(skb, pmc, type, 0, 0, 0);
1920 pmc->mca_crcount--;
1921 }
1922 spin_unlock_bh(&pmc->mca_lock);
1923 }
1924 read_unlock_bh(&idev->lock);
1925 if (!skb)
1926 return;
1927 (void) mld_sendpack(skb);
1928}
1929
1930static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1931{
1932 struct net *net = dev_net(dev);
1933 struct sock *sk = net->ipv6.igmp_sk;
1934 struct inet6_dev *idev;
1935 struct sk_buff *skb;
1936 struct mld_msg *hdr;
1937 const struct in6_addr *snd_addr, *saddr;
1938 struct in6_addr addr_buf;
1939 int hlen = LL_RESERVED_SPACE(dev);
1940 int tlen = dev->needed_tailroom;
1941 int err, len, payload_len, full_len;
1942 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1943 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1944 IPV6_TLV_PADN, 0 };
1945 struct flowi6 fl6;
1946 struct dst_entry *dst;
1947
1948 if (type == ICMPV6_MGM_REDUCTION)
1949 snd_addr = &in6addr_linklocal_allrouters;
1950 else
1951 snd_addr = addr;
1952
1953 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
1954 payload_len = len + sizeof(ra);
1955 full_len = sizeof(struct ipv6hdr) + payload_len;
1956
1957 rcu_read_lock();
1958 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1959 IPSTATS_MIB_OUT, full_len);
1960 rcu_read_unlock();
1961
1962 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
1963
1964 if (!skb) {
1965 rcu_read_lock();
1966 IP6_INC_STATS(net, __in6_dev_get(dev),
1967 IPSTATS_MIB_OUTDISCARDS);
1968 rcu_read_unlock();
1969 return;
1970 }
1971 skb->priority = TC_PRIO_CONTROL;
1972 skb_reserve(skb, hlen);
1973
1974 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1975 /* <draft-ietf-magma-mld-source-05.txt>:
1976 * use unspecified address as the source address
1977 * when a valid link-local address is not available.
1978 */
1979 saddr = &in6addr_any;
1980 } else
1981 saddr = &addr_buf;
1982
1983 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1984
1985 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1986
1987 hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
1988 memset(hdr, 0, sizeof(struct mld_msg));
1989 hdr->mld_type = type;
1990 hdr->mld_mca = *addr;
1991
1992 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1993 IPPROTO_ICMPV6,
1994 csum_partial(hdr, len, 0));
1995
1996 rcu_read_lock();
1997 idev = __in6_dev_get(skb->dev);
1998
1999 icmpv6_flow_init(sk, &fl6, type,
2000 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2001 skb->dev->ifindex);
2002 dst = icmp6_dst_alloc(skb->dev, &fl6);
2003 if (IS_ERR(dst)) {
2004 err = PTR_ERR(dst);
2005 goto err_out;
2006 }
2007
2008 skb_dst_set(skb, dst);
2009 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2010 net, sk, skb, NULL, skb->dev,
2011 dst_output);
2012out:
2013 if (!err) {
2014 ICMP6MSGOUT_INC_STATS(net, idev, type);
2015 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2016 } else
2017 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2018
2019 rcu_read_unlock();
2020 return;
2021
2022err_out:
2023 kfree_skb(skb);
2024 goto out;
2025}
2026
2027static void mld_send_initial_cr(struct inet6_dev *idev)
2028{
2029 struct sk_buff *skb;
2030 struct ifmcaddr6 *pmc;
2031 int type;
2032
2033 if (mld_in_v1_mode(idev))
2034 return;
2035
2036 skb = NULL;
2037 read_lock_bh(&idev->lock);
2038 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2039 spin_lock_bh(&pmc->mca_lock);
2040 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2041 type = MLD2_CHANGE_TO_EXCLUDE;
2042 else
2043 type = MLD2_CHANGE_TO_INCLUDE;
2044 skb = add_grec(skb, pmc, type, 0, 0, 1);
2045 spin_unlock_bh(&pmc->mca_lock);
2046 }
2047 read_unlock_bh(&idev->lock);
2048 if (skb)
2049 mld_sendpack(skb);
2050}
2051
2052void ipv6_mc_dad_complete(struct inet6_dev *idev)
2053{
2054 idev->mc_dad_count = idev->mc_qrv;
2055 if (idev->mc_dad_count) {
2056 mld_send_initial_cr(idev);
2057 idev->mc_dad_count--;
2058 if (idev->mc_dad_count)
2059 mld_dad_start_timer(idev, idev->mc_maxdelay);
2060 }
2061}
2062
2063static void mld_dad_timer_expire(unsigned long data)
2064{
2065 struct inet6_dev *idev = (struct inet6_dev *)data;
2066
2067 mld_send_initial_cr(idev);
2068 if (idev->mc_dad_count) {
2069 idev->mc_dad_count--;
2070 if (idev->mc_dad_count)
2071 mld_dad_start_timer(idev, idev->mc_maxdelay);
2072 }
2073 in6_dev_put(idev);
2074}
2075
2076static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2077 const struct in6_addr *psfsrc)
2078{
2079 struct ip6_sf_list *psf, *psf_prev;
2080 int rv = 0;
2081
2082 psf_prev = NULL;
2083 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2084 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2085 break;
2086 psf_prev = psf;
2087 }
2088 if (!psf || psf->sf_count[sfmode] == 0) {
2089 /* source filter not found, or count wrong => bug */
2090 return -ESRCH;
2091 }
2092 psf->sf_count[sfmode]--;
2093 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2094 struct inet6_dev *idev = pmc->idev;
2095
2096 /* no more filters for this source */
2097 if (psf_prev)
2098 psf_prev->sf_next = psf->sf_next;
2099 else
2100 pmc->mca_sources = psf->sf_next;
2101 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2102 !mld_in_v1_mode(idev)) {
2103 psf->sf_crcount = idev->mc_qrv;
2104 psf->sf_next = pmc->mca_tomb;
2105 pmc->mca_tomb = psf;
2106 rv = 1;
2107 } else
2108 kfree(psf);
2109 }
2110 return rv;
2111}
2112
2113static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2114 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2115 int delta)
2116{
2117 struct ifmcaddr6 *pmc;
2118 int changerec = 0;
2119 int i, err;
2120
2121 if (!idev)
2122 return -ENODEV;
2123 read_lock_bh(&idev->lock);
2124 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2125 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2126 break;
2127 }
2128 if (!pmc) {
2129 /* MCA not found?? bug */
2130 read_unlock_bh(&idev->lock);
2131 return -ESRCH;
2132 }
2133 spin_lock_bh(&pmc->mca_lock);
2134 sf_markstate(pmc);
2135 if (!delta) {
2136 if (!pmc->mca_sfcount[sfmode]) {
2137 spin_unlock_bh(&pmc->mca_lock);
2138 read_unlock_bh(&idev->lock);
2139 return -EINVAL;
2140 }
2141 pmc->mca_sfcount[sfmode]--;
2142 }
2143 err = 0;
2144 for (i = 0; i < sfcount; i++) {
2145 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2146
2147 changerec |= rv > 0;
2148 if (!err && rv < 0)
2149 err = rv;
2150 }
2151 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2152 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2153 pmc->mca_sfcount[MCAST_INCLUDE]) {
2154 struct ip6_sf_list *psf;
2155
2156 /* filter mode change */
2157 pmc->mca_sfmode = MCAST_INCLUDE;
2158 pmc->mca_crcount = idev->mc_qrv;
2159 idev->mc_ifc_count = pmc->mca_crcount;
2160 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2161 psf->sf_crcount = 0;
2162 mld_ifc_event(pmc->idev);
2163 } else if (sf_setstate(pmc) || changerec)
2164 mld_ifc_event(pmc->idev);
2165 spin_unlock_bh(&pmc->mca_lock);
2166 read_unlock_bh(&idev->lock);
2167 return err;
2168}
2169
2170/*
2171 * Add multicast single-source filter to the interface list
2172 */
2173static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2174 const struct in6_addr *psfsrc)
2175{
2176 struct ip6_sf_list *psf, *psf_prev;
2177
2178 psf_prev = NULL;
2179 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2180 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2181 break;
2182 psf_prev = psf;
2183 }
2184 if (!psf) {
2185 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
2186 if (!psf)
2187 return -ENOBUFS;
2188
2189 psf->sf_addr = *psfsrc;
2190 if (psf_prev) {
2191 psf_prev->sf_next = psf;
2192 } else
2193 pmc->mca_sources = psf;
2194 }
2195 psf->sf_count[sfmode]++;
2196 return 0;
2197}
2198
2199static void sf_markstate(struct ifmcaddr6 *pmc)
2200{
2201 struct ip6_sf_list *psf;
2202 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2203
2204 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2205 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2206 psf->sf_oldin = mca_xcount ==
2207 psf->sf_count[MCAST_EXCLUDE] &&
2208 !psf->sf_count[MCAST_INCLUDE];
2209 } else
2210 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2211}
2212
2213static int sf_setstate(struct ifmcaddr6 *pmc)
2214{
2215 struct ip6_sf_list *psf, *dpsf;
2216 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2217 int qrv = pmc->idev->mc_qrv;
2218 int new_in, rv;
2219
2220 rv = 0;
2221 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2222 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2223 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2224 !psf->sf_count[MCAST_INCLUDE];
2225 } else
2226 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2227 if (new_in) {
2228 if (!psf->sf_oldin) {
2229 struct ip6_sf_list *prev = NULL;
2230
2231 for (dpsf = pmc->mca_tomb; dpsf;
2232 dpsf = dpsf->sf_next) {
2233 if (ipv6_addr_equal(&dpsf->sf_addr,
2234 &psf->sf_addr))
2235 break;
2236 prev = dpsf;
2237 }
2238 if (dpsf) {
2239 if (prev)
2240 prev->sf_next = dpsf->sf_next;
2241 else
2242 pmc->mca_tomb = dpsf->sf_next;
2243 kfree(dpsf);
2244 }
2245 psf->sf_crcount = qrv;
2246 rv++;
2247 }
2248 } else if (psf->sf_oldin) {
2249 psf->sf_crcount = 0;
2250 /*
2251 * add or update "delete" records if an active filter
2252 * is now inactive
2253 */
2254 for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
2255 if (ipv6_addr_equal(&dpsf->sf_addr,
2256 &psf->sf_addr))
2257 break;
2258 if (!dpsf) {
2259 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2260 if (!dpsf)
2261 continue;
2262 *dpsf = *psf;
2263 /* pmc->mca_lock held by callers */
2264 dpsf->sf_next = pmc->mca_tomb;
2265 pmc->mca_tomb = dpsf;
2266 }
2267 dpsf->sf_crcount = qrv;
2268 rv++;
2269 }
2270 }
2271 return rv;
2272}
2273
2274/*
2275 * Add multicast source filter list to the interface list
2276 */
2277static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2278 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2279 int delta)
2280{
2281 struct ifmcaddr6 *pmc;
2282 int isexclude;
2283 int i, err;
2284
2285 if (!idev)
2286 return -ENODEV;
2287 read_lock_bh(&idev->lock);
2288 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2289 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2290 break;
2291 }
2292 if (!pmc) {
2293 /* MCA not found?? bug */
2294 read_unlock_bh(&idev->lock);
2295 return -ESRCH;
2296 }
2297 spin_lock_bh(&pmc->mca_lock);
2298
2299 sf_markstate(pmc);
2300 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2301 if (!delta)
2302 pmc->mca_sfcount[sfmode]++;
2303 err = 0;
2304 for (i = 0; i < sfcount; i++) {
2305 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2306 if (err)
2307 break;
2308 }
2309 if (err) {
2310 int j;
2311
2312 if (!delta)
2313 pmc->mca_sfcount[sfmode]--;
2314 for (j = 0; j < i; j++)
2315 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2316 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2317 struct ip6_sf_list *psf;
2318
2319 /* filter mode change */
2320 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2321 pmc->mca_sfmode = MCAST_EXCLUDE;
2322 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2323 pmc->mca_sfmode = MCAST_INCLUDE;
2324 /* else no filters; keep old mode for reports */
2325
2326 pmc->mca_crcount = idev->mc_qrv;
2327 idev->mc_ifc_count = pmc->mca_crcount;
2328 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2329 psf->sf_crcount = 0;
2330 mld_ifc_event(idev);
2331 } else if (sf_setstate(pmc))
2332 mld_ifc_event(idev);
2333 spin_unlock_bh(&pmc->mca_lock);
2334 read_unlock_bh(&idev->lock);
2335 return err;
2336}
2337
2338static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2339{
2340 struct ip6_sf_list *psf, *nextpsf;
2341
2342 for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
2343 nextpsf = psf->sf_next;
2344 kfree(psf);
2345 }
2346 pmc->mca_tomb = NULL;
2347 for (psf = pmc->mca_sources; psf; psf = nextpsf) {
2348 nextpsf = psf->sf_next;
2349 kfree(psf);
2350 }
2351 pmc->mca_sources = NULL;
2352 pmc->mca_sfmode = MCAST_EXCLUDE;
2353 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2354 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2355}
2356
2357
2358static void igmp6_join_group(struct ifmcaddr6 *ma)
2359{
2360 unsigned long delay;
2361
2362 if (ma->mca_flags & MAF_NOREPORT)
2363 return;
2364
2365 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2366
2367 delay = prandom_u32() % unsolicited_report_interval(ma->idev);
2368
2369 spin_lock_bh(&ma->mca_lock);
2370 if (del_timer(&ma->mca_timer)) {
2371 atomic_dec(&ma->mca_refcnt);
2372 delay = ma->mca_timer.expires - jiffies;
2373 }
2374
2375 if (!mod_timer(&ma->mca_timer, jiffies + delay))
2376 atomic_inc(&ma->mca_refcnt);
2377 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2378 spin_unlock_bh(&ma->mca_lock);
2379}
2380
2381static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2382 struct inet6_dev *idev)
2383{
2384 int err;
2385
2386 /* callers have the socket lock and rtnl lock
2387 * so no other readers or writers of iml or its sflist
2388 */
2389 if (!iml->sflist) {
2390 /* any-source empty exclude case */
2391 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2392 }
2393 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2394 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2395 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2396 iml->sflist = NULL;
2397 return err;
2398}
2399
2400static void igmp6_leave_group(struct ifmcaddr6 *ma)
2401{
2402 if (mld_in_v1_mode(ma->idev)) {
2403 if (ma->mca_flags & MAF_LAST_REPORTER)
2404 igmp6_send(&ma->mca_addr, ma->idev->dev,
2405 ICMPV6_MGM_REDUCTION);
2406 } else {
2407 mld_add_delrec(ma->idev, ma);
2408 mld_ifc_event(ma->idev);
2409 }
2410}
2411
2412static void mld_gq_timer_expire(unsigned long data)
2413{
2414 struct inet6_dev *idev = (struct inet6_dev *)data;
2415
2416 idev->mc_gq_running = 0;
2417 mld_send_report(idev, NULL);
2418 in6_dev_put(idev);
2419}
2420
2421static void mld_ifc_timer_expire(unsigned long data)
2422{
2423 struct inet6_dev *idev = (struct inet6_dev *)data;
2424
2425 mld_send_cr(idev);
2426 if (idev->mc_ifc_count) {
2427 idev->mc_ifc_count--;
2428 if (idev->mc_ifc_count)
2429 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2430 }
2431 in6_dev_put(idev);
2432}
2433
2434static void mld_ifc_event(struct inet6_dev *idev)
2435{
2436 if (mld_in_v1_mode(idev))
2437 return;
2438 idev->mc_ifc_count = idev->mc_qrv;
2439 mld_ifc_start_timer(idev, 1);
2440}
2441
2442
2443static void igmp6_timer_handler(unsigned long data)
2444{
2445 struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
2446
2447 if (mld_in_v1_mode(ma->idev))
2448 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2449 else
2450 mld_send_report(ma->idev, ma);
2451
2452 spin_lock(&ma->mca_lock);
2453 ma->mca_flags |= MAF_LAST_REPORTER;
2454 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2455 spin_unlock(&ma->mca_lock);
2456 ma_put(ma);
2457}
2458
2459/* Device changing type */
2460
2461void ipv6_mc_unmap(struct inet6_dev *idev)
2462{
2463 struct ifmcaddr6 *i;
2464
2465 /* Install multicast list, except for all-nodes (already installed) */
2466
2467 read_lock_bh(&idev->lock);
2468 for (i = idev->mc_list; i; i = i->next)
2469 igmp6_group_dropped(i);
2470 read_unlock_bh(&idev->lock);
2471}
2472
2473void ipv6_mc_remap(struct inet6_dev *idev)
2474{
2475 ipv6_mc_up(idev);
2476}
2477
2478/* Device going down */
2479
2480void ipv6_mc_down(struct inet6_dev *idev)
2481{
2482 struct ifmcaddr6 *i;
2483
2484 /* Withdraw multicast list */
2485
2486 read_lock_bh(&idev->lock);
2487 mld_ifc_stop_timer(idev);
2488 mld_gq_stop_timer(idev);
2489 mld_dad_stop_timer(idev);
2490
2491 for (i = idev->mc_list; i; i = i->next)
2492 igmp6_group_dropped(i);
2493 read_unlock_bh(&idev->lock);
2494
2495 mld_clear_delrec(idev);
2496}
2497
2498static void ipv6_mc_reset(struct inet6_dev *idev)
2499{
2500 idev->mc_qrv = sysctl_mld_qrv;
2501 idev->mc_qi = MLD_QI_DEFAULT;
2502 idev->mc_qri = MLD_QRI_DEFAULT;
2503 idev->mc_v1_seen = 0;
2504 idev->mc_maxdelay = unsolicited_report_interval(idev);
2505}
2506
2507/* Device going up */
2508
2509void ipv6_mc_up(struct inet6_dev *idev)
2510{
2511 struct ifmcaddr6 *i;
2512
2513 /* Install multicast list, except for all-nodes (already installed) */
2514
2515 read_lock_bh(&idev->lock);
2516 ipv6_mc_reset(idev);
2517 for (i = idev->mc_list; i; i = i->next)
2518 igmp6_group_added(i);
2519 read_unlock_bh(&idev->lock);
2520}
2521
2522/* IPv6 device initialization. */
2523
2524void ipv6_mc_init_dev(struct inet6_dev *idev)
2525{
2526 write_lock_bh(&idev->lock);
2527 spin_lock_init(&idev->mc_lock);
2528 idev->mc_gq_running = 0;
2529 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2530 (unsigned long)idev);
2531 idev->mc_tomb = NULL;
2532 idev->mc_ifc_count = 0;
2533 setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
2534 (unsigned long)idev);
2535 setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
2536 (unsigned long)idev);
2537 ipv6_mc_reset(idev);
2538 write_unlock_bh(&idev->lock);
2539}
2540
2541/*
2542 * Device is about to be destroyed: clean up.
2543 */
2544
2545void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2546{
2547 struct ifmcaddr6 *i;
2548
2549 /* Deactivate timers */
2550 ipv6_mc_down(idev);
2551
2552 /* Delete all-nodes address. */
2553 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2554 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2555 * fail.
2556 */
2557 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2558
2559 if (idev->cnf.forwarding)
2560 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2561
2562 write_lock_bh(&idev->lock);
2563 while ((i = idev->mc_list) != NULL) {
2564 idev->mc_list = i->next;
2565 write_unlock_bh(&idev->lock);
2566
2567 igmp6_group_dropped(i);
2568 ma_put(i);
2569
2570 write_lock_bh(&idev->lock);
2571 }
2572 write_unlock_bh(&idev->lock);
2573}
2574
2575#ifdef CONFIG_PROC_FS
2576struct igmp6_mc_iter_state {
2577 struct seq_net_private p;
2578 struct net_device *dev;
2579 struct inet6_dev *idev;
2580};
2581
2582#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2583
2584static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2585{
2586 struct ifmcaddr6 *im = NULL;
2587 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2588 struct net *net = seq_file_net(seq);
2589
2590 state->idev = NULL;
2591 for_each_netdev_rcu(net, state->dev) {
2592 struct inet6_dev *idev;
2593 idev = __in6_dev_get(state->dev);
2594 if (!idev)
2595 continue;
2596 read_lock_bh(&idev->lock);
2597 im = idev->mc_list;
2598 if (im) {
2599 state->idev = idev;
2600 break;
2601 }
2602 read_unlock_bh(&idev->lock);
2603 }
2604 return im;
2605}
2606
2607static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2608{
2609 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2610
2611 im = im->next;
2612 while (!im) {
2613 if (likely(state->idev))
2614 read_unlock_bh(&state->idev->lock);
2615
2616 state->dev = next_net_device_rcu(state->dev);
2617 if (!state->dev) {
2618 state->idev = NULL;
2619 break;
2620 }
2621 state->idev = __in6_dev_get(state->dev);
2622 if (!state->idev)
2623 continue;
2624 read_lock_bh(&state->idev->lock);
2625 im = state->idev->mc_list;
2626 }
2627 return im;
2628}
2629
2630static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2631{
2632 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2633 if (im)
2634 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2635 --pos;
2636 return pos ? NULL : im;
2637}
2638
2639static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2640 __acquires(RCU)
2641{
2642 rcu_read_lock();
2643 return igmp6_mc_get_idx(seq, *pos);
2644}
2645
2646static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2647{
2648 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2649
2650 ++*pos;
2651 return im;
2652}
2653
2654static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2655 __releases(RCU)
2656{
2657 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2658
2659 if (likely(state->idev)) {
2660 read_unlock_bh(&state->idev->lock);
2661 state->idev = NULL;
2662 }
2663 state->dev = NULL;
2664 rcu_read_unlock();
2665}
2666
2667static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2668{
2669 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2670 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2671
2672 seq_printf(seq,
2673 "%-4d %-15s %pi6 %5d %08X %ld\n",
2674 state->dev->ifindex, state->dev->name,
2675 &im->mca_addr,
2676 im->mca_users, im->mca_flags,
2677 (im->mca_flags&MAF_TIMER_RUNNING) ?
2678 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2679 return 0;
2680}
2681
2682static const struct seq_operations igmp6_mc_seq_ops = {
2683 .start = igmp6_mc_seq_start,
2684 .next = igmp6_mc_seq_next,
2685 .stop = igmp6_mc_seq_stop,
2686 .show = igmp6_mc_seq_show,
2687};
2688
2689static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
2690{
2691 return seq_open_net(inode, file, &igmp6_mc_seq_ops,
2692 sizeof(struct igmp6_mc_iter_state));
2693}
2694
2695static const struct file_operations igmp6_mc_seq_fops = {
2696 .owner = THIS_MODULE,
2697 .open = igmp6_mc_seq_open,
2698 .read = seq_read,
2699 .llseek = seq_lseek,
2700 .release = seq_release_net,
2701};
2702
2703struct igmp6_mcf_iter_state {
2704 struct seq_net_private p;
2705 struct net_device *dev;
2706 struct inet6_dev *idev;
2707 struct ifmcaddr6 *im;
2708};
2709
2710#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2711
2712static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2713{
2714 struct ip6_sf_list *psf = NULL;
2715 struct ifmcaddr6 *im = NULL;
2716 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2717 struct net *net = seq_file_net(seq);
2718
2719 state->idev = NULL;
2720 state->im = NULL;
2721 for_each_netdev_rcu(net, state->dev) {
2722 struct inet6_dev *idev;
2723 idev = __in6_dev_get(state->dev);
2724 if (unlikely(idev == NULL))
2725 continue;
2726 read_lock_bh(&idev->lock);
2727 im = idev->mc_list;
2728 if (likely(im)) {
2729 spin_lock_bh(&im->mca_lock);
2730 psf = im->mca_sources;
2731 if (likely(psf)) {
2732 state->im = im;
2733 state->idev = idev;
2734 break;
2735 }
2736 spin_unlock_bh(&im->mca_lock);
2737 }
2738 read_unlock_bh(&idev->lock);
2739 }
2740 return psf;
2741}
2742
2743static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2744{
2745 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2746
2747 psf = psf->sf_next;
2748 while (!psf) {
2749 spin_unlock_bh(&state->im->mca_lock);
2750 state->im = state->im->next;
2751 while (!state->im) {
2752 if (likely(state->idev))
2753 read_unlock_bh(&state->idev->lock);
2754
2755 state->dev = next_net_device_rcu(state->dev);
2756 if (!state->dev) {
2757 state->idev = NULL;
2758 goto out;
2759 }
2760 state->idev = __in6_dev_get(state->dev);
2761 if (!state->idev)
2762 continue;
2763 read_lock_bh(&state->idev->lock);
2764 state->im = state->idev->mc_list;
2765 }
2766 if (!state->im)
2767 break;
2768 spin_lock_bh(&state->im->mca_lock);
2769 psf = state->im->mca_sources;
2770 }
2771out:
2772 return psf;
2773}
2774
2775static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2776{
2777 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2778 if (psf)
2779 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2780 --pos;
2781 return pos ? NULL : psf;
2782}
2783
2784static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2785 __acquires(RCU)
2786{
2787 rcu_read_lock();
2788 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2789}
2790
2791static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2792{
2793 struct ip6_sf_list *psf;
2794 if (v == SEQ_START_TOKEN)
2795 psf = igmp6_mcf_get_first(seq);
2796 else
2797 psf = igmp6_mcf_get_next(seq, v);
2798 ++*pos;
2799 return psf;
2800}
2801
2802static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2803 __releases(RCU)
2804{
2805 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2806 if (likely(state->im)) {
2807 spin_unlock_bh(&state->im->mca_lock);
2808 state->im = NULL;
2809 }
2810 if (likely(state->idev)) {
2811 read_unlock_bh(&state->idev->lock);
2812 state->idev = NULL;
2813 }
2814 state->dev = NULL;
2815 rcu_read_unlock();
2816}
2817
2818static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2819{
2820 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2821 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2822
2823 if (v == SEQ_START_TOKEN) {
2824 seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
2825 } else {
2826 seq_printf(seq,
2827 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2828 state->dev->ifindex, state->dev->name,
2829 &state->im->mca_addr,
2830 &psf->sf_addr,
2831 psf->sf_count[MCAST_INCLUDE],
2832 psf->sf_count[MCAST_EXCLUDE]);
2833 }
2834 return 0;
2835}
2836
2837static const struct seq_operations igmp6_mcf_seq_ops = {
2838 .start = igmp6_mcf_seq_start,
2839 .next = igmp6_mcf_seq_next,
2840 .stop = igmp6_mcf_seq_stop,
2841 .show = igmp6_mcf_seq_show,
2842};
2843
2844static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2845{
2846 return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
2847 sizeof(struct igmp6_mcf_iter_state));
2848}
2849
2850static const struct file_operations igmp6_mcf_seq_fops = {
2851 .owner = THIS_MODULE,
2852 .open = igmp6_mcf_seq_open,
2853 .read = seq_read,
2854 .llseek = seq_lseek,
2855 .release = seq_release_net,
2856};
2857
2858static int __net_init igmp6_proc_init(struct net *net)
2859{
2860 int err;
2861
2862 err = -ENOMEM;
2863 if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
2864 goto out;
2865 if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
2866 &igmp6_mcf_seq_fops))
2867 goto out_proc_net_igmp6;
2868
2869 err = 0;
2870out:
2871 return err;
2872
2873out_proc_net_igmp6:
2874 remove_proc_entry("igmp6", net->proc_net);
2875 goto out;
2876}
2877
2878static void __net_exit igmp6_proc_exit(struct net *net)
2879{
2880 remove_proc_entry("mcfilter6", net->proc_net);
2881 remove_proc_entry("igmp6", net->proc_net);
2882}
2883#else
2884static inline int igmp6_proc_init(struct net *net)
2885{
2886 return 0;
2887}
2888static inline void igmp6_proc_exit(struct net *net)
2889{
2890}
2891#endif
2892
2893static int __net_init igmp6_net_init(struct net *net)
2894{
2895 int err;
2896
2897 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2898 SOCK_RAW, IPPROTO_ICMPV6, net);
2899 if (err < 0) {
2900 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2901 err);
2902 goto out;
2903 }
2904
2905 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2906
2907 err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
2908 SOCK_RAW, IPPROTO_ICMPV6, net);
2909 if (err < 0) {
2910 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
2911 err);
2912 goto out_sock_create;
2913 }
2914
2915 err = igmp6_proc_init(net);
2916 if (err)
2917 goto out_sock_create_autojoin;
2918
2919 return 0;
2920
2921out_sock_create_autojoin:
2922 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2923out_sock_create:
2924 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2925out:
2926 return err;
2927}
2928
2929static void __net_exit igmp6_net_exit(struct net *net)
2930{
2931 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2932 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2933 igmp6_proc_exit(net);
2934}
2935
2936static struct pernet_operations igmp6_net_ops = {
2937 .init = igmp6_net_init,
2938 .exit = igmp6_net_exit,
2939};
2940
2941int __init igmp6_init(void)
2942{
2943 return register_pernet_subsys(&igmp6_net_ops);
2944}
2945
2946void igmp6_cleanup(void)
2947{
2948 unregister_pernet_subsys(&igmp6_net_ops);
2949}