Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Multicast support for IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10 */
11
12/* Changes:
13 *
14 * yoshfuji : fix format of router-alert option
15 * YOSHIFUJI Hideaki @USAGI:
16 * Fixed source address for MLD message based on
17 * <draft-ietf-magma-mld-source-05.txt>.
18 * YOSHIFUJI Hideaki @USAGI:
19 * - Ignore Queries for invalid addresses.
20 * - MLD for link-local addresses.
21 * David L Stevens <dlstevens@us.ibm.com>:
22 * - MLDv2 support
23 */
24
25#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/string.h>
29#include <linux/socket.h>
30#include <linux/sockios.h>
31#include <linux/jiffies.h>
32#include <linux/net.h>
33#include <linux/in.h>
34#include <linux/in6.h>
35#include <linux/netdevice.h>
36#include <linux/if_arp.h>
37#include <linux/route.h>
38#include <linux/init.h>
39#include <linux/proc_fs.h>
40#include <linux/seq_file.h>
41#include <linux/slab.h>
42#include <linux/pkt_sched.h>
43#include <net/mld.h>
44#include <linux/workqueue.h>
45
46#include <linux/netfilter.h>
47#include <linux/netfilter_ipv6.h>
48
49#include <net/net_namespace.h>
50#include <net/sock.h>
51#include <net/snmp.h>
52
53#include <net/ipv6.h>
54#include <net/protocol.h>
55#include <net/if_inet6.h>
56#include <net/ndisc.h>
57#include <net/addrconf.h>
58#include <net/ip6_route.h>
59#include <net/inet_common.h>
60
61#include <net/ip6_checksum.h>
62
63/* Ensure that we have struct in6_addr aligned on 32bit word. */
64static int __mld2_query_bugs[] __attribute__((__unused__)) = {
65 BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
66 BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
67 BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
68};
69
70static struct workqueue_struct *mld_wq;
71static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
72
73static void igmp6_join_group(struct ifmcaddr6 *ma);
74static void igmp6_leave_group(struct ifmcaddr6 *ma);
75static void mld_mca_work(struct work_struct *work);
76
77static void mld_ifc_event(struct inet6_dev *idev);
78static bool mld_in_v1_mode(const struct inet6_dev *idev);
79static int sf_setstate(struct ifmcaddr6 *pmc);
80static void sf_markstate(struct ifmcaddr6 *pmc);
81static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
82static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
83 int sfmode, int sfcount, const struct in6_addr *psfsrc,
84 int delta);
85static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
86 int sfmode, int sfcount, const struct in6_addr *psfsrc,
87 int delta);
88static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
89 struct inet6_dev *idev);
90static int __ipv6_dev_mc_inc(struct net_device *dev,
91 const struct in6_addr *addr, unsigned int mode);
92
93#define MLD_QRV_DEFAULT 2
94/* RFC3810, 9.2. Query Interval */
95#define MLD_QI_DEFAULT (125 * HZ)
96/* RFC3810, 9.3. Query Response Interval */
97#define MLD_QRI_DEFAULT (10 * HZ)
98
99/* RFC3810, 8.1 Query Version Distinctions */
100#define MLD_V1_QUERY_LEN 24
101#define MLD_V2_QUERY_LEN_MIN 28
102
103#define IPV6_MLD_MAX_MSF 64
104
105int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
106int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
107
108/*
109 * socket join on multicast group
110 */
111#define mc_dereference(e, idev) \
112 rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
113
114#define sock_dereference(e, sk) \
115 rcu_dereference_protected(e, lockdep_sock_is_held(sk))
116
117#define for_each_pmc_socklock(np, sk, pmc) \
118 for (pmc = sock_dereference((np)->ipv6_mc_list, sk); \
119 pmc; \
120 pmc = sock_dereference(pmc->next, sk))
121
122#define for_each_pmc_rcu(np, pmc) \
123 for (pmc = rcu_dereference((np)->ipv6_mc_list); \
124 pmc; \
125 pmc = rcu_dereference(pmc->next))
126
127#define for_each_psf_mclock(mc, psf) \
128 for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
129 psf; \
130 psf = mc_dereference(psf->sf_next, mc->idev))
131
132#define for_each_psf_rcu(mc, psf) \
133 for (psf = rcu_dereference((mc)->mca_sources); \
134 psf; \
135 psf = rcu_dereference(psf->sf_next))
136
137#define for_each_psf_tomb(mc, psf) \
138 for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
139 psf; \
140 psf = mc_dereference(psf->sf_next, mc->idev))
141
142#define for_each_mc_mclock(idev, mc) \
143 for (mc = mc_dereference((idev)->mc_list, idev); \
144 mc; \
145 mc = mc_dereference(mc->next, idev))
146
147#define for_each_mc_rcu(idev, mc) \
148 for (mc = rcu_dereference((idev)->mc_list); \
149 mc; \
150 mc = rcu_dereference(mc->next))
151
152#define for_each_mc_tomb(idev, mc) \
153 for (mc = mc_dereference((idev)->mc_tomb, idev); \
154 mc; \
155 mc = mc_dereference(mc->next, idev))
156
157static int unsolicited_report_interval(struct inet6_dev *idev)
158{
159 int iv;
160
161 if (mld_in_v1_mode(idev))
162 iv = idev->cnf.mldv1_unsolicited_report_interval;
163 else
164 iv = idev->cnf.mldv2_unsolicited_report_interval;
165
166 return iv > 0 ? iv : 1;
167}
168
169static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
170 const struct in6_addr *addr, unsigned int mode)
171{
172 struct net_device *dev = NULL;
173 struct ipv6_mc_socklist *mc_lst;
174 struct ipv6_pinfo *np = inet6_sk(sk);
175 struct net *net = sock_net(sk);
176 int err;
177
178 ASSERT_RTNL();
179
180 if (!ipv6_addr_is_multicast(addr))
181 return -EINVAL;
182
183 for_each_pmc_socklock(np, sk, mc_lst) {
184 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
185 ipv6_addr_equal(&mc_lst->addr, addr))
186 return -EADDRINUSE;
187 }
188
189 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
190
191 if (!mc_lst)
192 return -ENOMEM;
193
194 mc_lst->next = NULL;
195 mc_lst->addr = *addr;
196
197 if (ifindex == 0) {
198 struct rt6_info *rt;
199 rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
200 if (rt) {
201 dev = rt->dst.dev;
202 ip6_rt_put(rt);
203 }
204 } else
205 dev = __dev_get_by_index(net, ifindex);
206
207 if (!dev) {
208 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
209 return -ENODEV;
210 }
211
212 mc_lst->ifindex = dev->ifindex;
213 mc_lst->sfmode = mode;
214 RCU_INIT_POINTER(mc_lst->sflist, NULL);
215
216 /*
217 * now add/increase the group membership on the device
218 */
219
220 err = __ipv6_dev_mc_inc(dev, addr, mode);
221
222 if (err) {
223 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
224 return err;
225 }
226
227 mc_lst->next = np->ipv6_mc_list;
228 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
229
230 return 0;
231}
232
233int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
234{
235 return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
236}
237EXPORT_SYMBOL(ipv6_sock_mc_join);
238
239int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
240 const struct in6_addr *addr, unsigned int mode)
241{
242 return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
243}
244
245/*
246 * socket leave on multicast group
247 */
248int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
249{
250 struct ipv6_pinfo *np = inet6_sk(sk);
251 struct ipv6_mc_socklist *mc_lst;
252 struct ipv6_mc_socklist __rcu **lnk;
253 struct net *net = sock_net(sk);
254
255 ASSERT_RTNL();
256
257 if (!ipv6_addr_is_multicast(addr))
258 return -EINVAL;
259
260 for (lnk = &np->ipv6_mc_list;
261 (mc_lst = sock_dereference(*lnk, sk)) != NULL;
262 lnk = &mc_lst->next) {
263 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
264 ipv6_addr_equal(&mc_lst->addr, addr)) {
265 struct net_device *dev;
266
267 *lnk = mc_lst->next;
268
269 dev = __dev_get_by_index(net, mc_lst->ifindex);
270 if (dev) {
271 struct inet6_dev *idev = __in6_dev_get(dev);
272
273 ip6_mc_leave_src(sk, mc_lst, idev);
274 if (idev)
275 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
276 } else {
277 ip6_mc_leave_src(sk, mc_lst, NULL);
278 }
279
280 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
281 kfree_rcu(mc_lst, rcu);
282 return 0;
283 }
284 }
285
286 return -EADDRNOTAVAIL;
287}
288EXPORT_SYMBOL(ipv6_sock_mc_drop);
289
290static struct inet6_dev *ip6_mc_find_dev_rtnl(struct net *net,
291 const struct in6_addr *group,
292 int ifindex)
293{
294 struct net_device *dev = NULL;
295 struct inet6_dev *idev = NULL;
296
297 if (ifindex == 0) {
298 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
299
300 if (rt) {
301 dev = rt->dst.dev;
302 ip6_rt_put(rt);
303 }
304 } else {
305 dev = __dev_get_by_index(net, ifindex);
306 }
307
308 if (!dev)
309 return NULL;
310 idev = __in6_dev_get(dev);
311 if (!idev)
312 return NULL;
313 if (idev->dead)
314 return NULL;
315 return idev;
316}
317
318void __ipv6_sock_mc_close(struct sock *sk)
319{
320 struct ipv6_pinfo *np = inet6_sk(sk);
321 struct ipv6_mc_socklist *mc_lst;
322 struct net *net = sock_net(sk);
323
324 ASSERT_RTNL();
325
326 while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
327 struct net_device *dev;
328
329 np->ipv6_mc_list = mc_lst->next;
330
331 dev = __dev_get_by_index(net, mc_lst->ifindex);
332 if (dev) {
333 struct inet6_dev *idev = __in6_dev_get(dev);
334
335 ip6_mc_leave_src(sk, mc_lst, idev);
336 if (idev)
337 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
338 } else {
339 ip6_mc_leave_src(sk, mc_lst, NULL);
340 }
341
342 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
343 kfree_rcu(mc_lst, rcu);
344 }
345}
346
347void ipv6_sock_mc_close(struct sock *sk)
348{
349 struct ipv6_pinfo *np = inet6_sk(sk);
350
351 if (!rcu_access_pointer(np->ipv6_mc_list))
352 return;
353
354 rtnl_lock();
355 lock_sock(sk);
356 __ipv6_sock_mc_close(sk);
357 release_sock(sk);
358 rtnl_unlock();
359}
360
361int ip6_mc_source(int add, int omode, struct sock *sk,
362 struct group_source_req *pgsr)
363{
364 struct in6_addr *source, *group;
365 struct ipv6_mc_socklist *pmc;
366 struct inet6_dev *idev;
367 struct ipv6_pinfo *inet6 = inet6_sk(sk);
368 struct ip6_sf_socklist *psl;
369 struct net *net = sock_net(sk);
370 int i, j, rv;
371 int leavegroup = 0;
372 int err;
373
374 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
375 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
376
377 if (!ipv6_addr_is_multicast(group))
378 return -EINVAL;
379
380 idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface);
381 if (!idev)
382 return -ENODEV;
383
384 err = -EADDRNOTAVAIL;
385
386 mutex_lock(&idev->mc_lock);
387 for_each_pmc_socklock(inet6, sk, pmc) {
388 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
389 continue;
390 if (ipv6_addr_equal(&pmc->addr, group))
391 break;
392 }
393 if (!pmc) { /* must have a prior join */
394 err = -EINVAL;
395 goto done;
396 }
397 /* if a source filter was set, must be the same mode as before */
398 if (rcu_access_pointer(pmc->sflist)) {
399 if (pmc->sfmode != omode) {
400 err = -EINVAL;
401 goto done;
402 }
403 } else if (pmc->sfmode != omode) {
404 /* allow mode switches for empty-set filters */
405 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
406 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
407 pmc->sfmode = omode;
408 }
409
410 psl = sock_dereference(pmc->sflist, sk);
411 if (!add) {
412 if (!psl)
413 goto done; /* err = -EADDRNOTAVAIL */
414 rv = !0;
415 for (i = 0; i < psl->sl_count; i++) {
416 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
417 if (rv == 0)
418 break;
419 }
420 if (rv) /* source not found */
421 goto done; /* err = -EADDRNOTAVAIL */
422
423 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
424 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
425 leavegroup = 1;
426 goto done;
427 }
428
429 /* update the interface filter */
430 ip6_mc_del_src(idev, group, omode, 1, source, 1);
431
432 for (j = i+1; j < psl->sl_count; j++)
433 psl->sl_addr[j-1] = psl->sl_addr[j];
434 psl->sl_count--;
435 err = 0;
436 goto done;
437 }
438 /* else, add a new source to the filter */
439
440 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
441 err = -ENOBUFS;
442 goto done;
443 }
444 if (!psl || psl->sl_count == psl->sl_max) {
445 struct ip6_sf_socklist *newpsl;
446 int count = IP6_SFBLOCK;
447
448 if (psl)
449 count += psl->sl_max;
450 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_KERNEL);
451 if (!newpsl) {
452 err = -ENOBUFS;
453 goto done;
454 }
455 newpsl->sl_max = count;
456 newpsl->sl_count = count - IP6_SFBLOCK;
457 if (psl) {
458 for (i = 0; i < psl->sl_count; i++)
459 newpsl->sl_addr[i] = psl->sl_addr[i];
460 atomic_sub(IP6_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
461 kfree_rcu(psl, rcu);
462 }
463 psl = newpsl;
464 rcu_assign_pointer(pmc->sflist, psl);
465 }
466 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
467 for (i = 0; i < psl->sl_count; i++) {
468 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
469 if (rv == 0) /* There is an error in the address. */
470 goto done;
471 }
472 for (j = psl->sl_count-1; j >= i; j--)
473 psl->sl_addr[j+1] = psl->sl_addr[j];
474 psl->sl_addr[i] = *source;
475 psl->sl_count++;
476 err = 0;
477 /* update the interface list */
478 ip6_mc_add_src(idev, group, omode, 1, source, 1);
479done:
480 mutex_unlock(&idev->mc_lock);
481 if (leavegroup)
482 err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
483 return err;
484}
485
486int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
487 struct sockaddr_storage *list)
488{
489 const struct in6_addr *group;
490 struct ipv6_mc_socklist *pmc;
491 struct inet6_dev *idev;
492 struct ipv6_pinfo *inet6 = inet6_sk(sk);
493 struct ip6_sf_socklist *newpsl, *psl;
494 struct net *net = sock_net(sk);
495 int leavegroup = 0;
496 int i, err;
497
498 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
499
500 if (!ipv6_addr_is_multicast(group))
501 return -EINVAL;
502 if (gsf->gf_fmode != MCAST_INCLUDE &&
503 gsf->gf_fmode != MCAST_EXCLUDE)
504 return -EINVAL;
505
506 idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface);
507 if (!idev)
508 return -ENODEV;
509
510 err = 0;
511
512 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
513 leavegroup = 1;
514 goto done;
515 }
516
517 for_each_pmc_socklock(inet6, sk, pmc) {
518 if (pmc->ifindex != gsf->gf_interface)
519 continue;
520 if (ipv6_addr_equal(&pmc->addr, group))
521 break;
522 }
523 if (!pmc) { /* must have a prior join */
524 err = -EINVAL;
525 goto done;
526 }
527 if (gsf->gf_numsrc) {
528 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
529 GFP_KERNEL);
530 if (!newpsl) {
531 err = -ENOBUFS;
532 goto done;
533 }
534 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
535 for (i = 0; i < newpsl->sl_count; ++i, ++list) {
536 struct sockaddr_in6 *psin6;
537
538 psin6 = (struct sockaddr_in6 *)list;
539 newpsl->sl_addr[i] = psin6->sin6_addr;
540 }
541 mutex_lock(&idev->mc_lock);
542 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
543 newpsl->sl_count, newpsl->sl_addr, 0);
544 if (err) {
545 mutex_unlock(&idev->mc_lock);
546 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
547 goto done;
548 }
549 mutex_unlock(&idev->mc_lock);
550 } else {
551 newpsl = NULL;
552 mutex_lock(&idev->mc_lock);
553 ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
554 mutex_unlock(&idev->mc_lock);
555 }
556
557 mutex_lock(&idev->mc_lock);
558 psl = sock_dereference(pmc->sflist, sk);
559 if (psl) {
560 ip6_mc_del_src(idev, group, pmc->sfmode,
561 psl->sl_count, psl->sl_addr, 0);
562 atomic_sub(IP6_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
563 kfree_rcu(psl, rcu);
564 } else {
565 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
566 }
567 mutex_unlock(&idev->mc_lock);
568 rcu_assign_pointer(pmc->sflist, newpsl);
569 pmc->sfmode = gsf->gf_fmode;
570 err = 0;
571done:
572 if (leavegroup)
573 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
574 return err;
575}
576
577int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
578 struct sockaddr_storage __user *p)
579{
580 struct ipv6_pinfo *inet6 = inet6_sk(sk);
581 const struct in6_addr *group;
582 struct ipv6_mc_socklist *pmc;
583 struct ip6_sf_socklist *psl;
584 int i, count, copycount;
585
586 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
587
588 if (!ipv6_addr_is_multicast(group))
589 return -EINVAL;
590
591 /* changes to the ipv6_mc_list require the socket lock and
592 * rtnl lock. We have the socket lock, so reading the list is safe.
593 */
594
595 for_each_pmc_socklock(inet6, sk, pmc) {
596 if (pmc->ifindex != gsf->gf_interface)
597 continue;
598 if (ipv6_addr_equal(group, &pmc->addr))
599 break;
600 }
601 if (!pmc) /* must have a prior join */
602 return -EADDRNOTAVAIL;
603
604 gsf->gf_fmode = pmc->sfmode;
605 psl = sock_dereference(pmc->sflist, sk);
606 count = psl ? psl->sl_count : 0;
607
608 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
609 gsf->gf_numsrc = count;
610
611 for (i = 0; i < copycount; i++, p++) {
612 struct sockaddr_in6 *psin6;
613 struct sockaddr_storage ss;
614
615 psin6 = (struct sockaddr_in6 *)&ss;
616 memset(&ss, 0, sizeof(ss));
617 psin6->sin6_family = AF_INET6;
618 psin6->sin6_addr = psl->sl_addr[i];
619 if (copy_to_user(p, &ss, sizeof(ss)))
620 return -EFAULT;
621 }
622 return 0;
623}
624
625bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
626 const struct in6_addr *src_addr)
627{
628 struct ipv6_pinfo *np = inet6_sk(sk);
629 struct ipv6_mc_socklist *mc;
630 struct ip6_sf_socklist *psl;
631 bool rv = true;
632
633 rcu_read_lock();
634 for_each_pmc_rcu(np, mc) {
635 if (ipv6_addr_equal(&mc->addr, mc_addr))
636 break;
637 }
638 if (!mc) {
639 rcu_read_unlock();
640 return np->mc_all;
641 }
642 psl = rcu_dereference(mc->sflist);
643 if (!psl) {
644 rv = mc->sfmode == MCAST_EXCLUDE;
645 } else {
646 int i;
647
648 for (i = 0; i < psl->sl_count; i++) {
649 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
650 break;
651 }
652 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
653 rv = false;
654 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
655 rv = false;
656 }
657 rcu_read_unlock();
658
659 return rv;
660}
661
662/* called with mc_lock */
663static void igmp6_group_added(struct ifmcaddr6 *mc)
664{
665 struct net_device *dev = mc->idev->dev;
666 char buf[MAX_ADDR_LEN];
667
668 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
669 IPV6_ADDR_SCOPE_LINKLOCAL)
670 return;
671
672 if (!(mc->mca_flags&MAF_LOADED)) {
673 mc->mca_flags |= MAF_LOADED;
674 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
675 dev_mc_add(dev, buf);
676 }
677
678 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
679 return;
680
681 if (mld_in_v1_mode(mc->idev)) {
682 igmp6_join_group(mc);
683 return;
684 }
685 /* else v2 */
686
687 /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
688 * should not send filter-mode change record as the mode
689 * should be from IN() to IN(A).
690 */
691 if (mc->mca_sfmode == MCAST_EXCLUDE)
692 mc->mca_crcount = mc->idev->mc_qrv;
693
694 mld_ifc_event(mc->idev);
695}
696
697/* called with mc_lock */
698static void igmp6_group_dropped(struct ifmcaddr6 *mc)
699{
700 struct net_device *dev = mc->idev->dev;
701 char buf[MAX_ADDR_LEN];
702
703 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
704 IPV6_ADDR_SCOPE_LINKLOCAL)
705 return;
706
707 if (mc->mca_flags&MAF_LOADED) {
708 mc->mca_flags &= ~MAF_LOADED;
709 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
710 dev_mc_del(dev, buf);
711 }
712
713 if (mc->mca_flags & MAF_NOREPORT)
714 return;
715
716 if (!mc->idev->dead)
717 igmp6_leave_group(mc);
718
719 if (cancel_delayed_work(&mc->mca_work))
720 refcount_dec(&mc->mca_refcnt);
721}
722
723/*
724 * deleted ifmcaddr6 manipulation
725 * called with mc_lock
726 */
727static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
728{
729 struct ifmcaddr6 *pmc;
730
731 /* this is an "ifmcaddr6" for convenience; only the fields below
732 * are actually used. In particular, the refcnt and users are not
733 * used for management of the delete list. Using the same structure
734 * for deleted items allows change reports to use common code with
735 * non-deleted or query-response MCA's.
736 */
737 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
738 if (!pmc)
739 return;
740
741 pmc->idev = im->idev;
742 in6_dev_hold(idev);
743 pmc->mca_addr = im->mca_addr;
744 pmc->mca_crcount = idev->mc_qrv;
745 pmc->mca_sfmode = im->mca_sfmode;
746 if (pmc->mca_sfmode == MCAST_INCLUDE) {
747 struct ip6_sf_list *psf;
748
749 rcu_assign_pointer(pmc->mca_tomb,
750 mc_dereference(im->mca_tomb, idev));
751 rcu_assign_pointer(pmc->mca_sources,
752 mc_dereference(im->mca_sources, idev));
753 RCU_INIT_POINTER(im->mca_tomb, NULL);
754 RCU_INIT_POINTER(im->mca_sources, NULL);
755
756 for_each_psf_mclock(pmc, psf)
757 psf->sf_crcount = pmc->mca_crcount;
758 }
759
760 rcu_assign_pointer(pmc->next, idev->mc_tomb);
761 rcu_assign_pointer(idev->mc_tomb, pmc);
762}
763
764/* called with mc_lock */
765static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
766{
767 struct ip6_sf_list *psf, *sources, *tomb;
768 struct in6_addr *pmca = &im->mca_addr;
769 struct ifmcaddr6 *pmc, *pmc_prev;
770
771 pmc_prev = NULL;
772 for_each_mc_tomb(idev, pmc) {
773 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
774 break;
775 pmc_prev = pmc;
776 }
777 if (pmc) {
778 if (pmc_prev)
779 rcu_assign_pointer(pmc_prev->next, pmc->next);
780 else
781 rcu_assign_pointer(idev->mc_tomb, pmc->next);
782 }
783
784 if (pmc) {
785 im->idev = pmc->idev;
786 if (im->mca_sfmode == MCAST_INCLUDE) {
787 tomb = rcu_replace_pointer(im->mca_tomb,
788 mc_dereference(pmc->mca_tomb, pmc->idev),
789 lockdep_is_held(&im->idev->mc_lock));
790 rcu_assign_pointer(pmc->mca_tomb, tomb);
791
792 sources = rcu_replace_pointer(im->mca_sources,
793 mc_dereference(pmc->mca_sources, pmc->idev),
794 lockdep_is_held(&im->idev->mc_lock));
795 rcu_assign_pointer(pmc->mca_sources, sources);
796 for_each_psf_mclock(im, psf)
797 psf->sf_crcount = idev->mc_qrv;
798 } else {
799 im->mca_crcount = idev->mc_qrv;
800 }
801 in6_dev_put(pmc->idev);
802 ip6_mc_clear_src(pmc);
803 kfree_rcu(pmc, rcu);
804 }
805}
806
807/* called with mc_lock */
808static void mld_clear_delrec(struct inet6_dev *idev)
809{
810 struct ifmcaddr6 *pmc, *nextpmc;
811
812 pmc = mc_dereference(idev->mc_tomb, idev);
813 RCU_INIT_POINTER(idev->mc_tomb, NULL);
814
815 for (; pmc; pmc = nextpmc) {
816 nextpmc = mc_dereference(pmc->next, idev);
817 ip6_mc_clear_src(pmc);
818 in6_dev_put(pmc->idev);
819 kfree_rcu(pmc, rcu);
820 }
821
822 /* clear dead sources, too */
823 for_each_mc_mclock(idev, pmc) {
824 struct ip6_sf_list *psf, *psf_next;
825
826 psf = mc_dereference(pmc->mca_tomb, idev);
827 RCU_INIT_POINTER(pmc->mca_tomb, NULL);
828 for (; psf; psf = psf_next) {
829 psf_next = mc_dereference(psf->sf_next, idev);
830 kfree_rcu(psf, rcu);
831 }
832 }
833}
834
835static void mld_clear_query(struct inet6_dev *idev)
836{
837 struct sk_buff *skb;
838
839 spin_lock_bh(&idev->mc_query_lock);
840 while ((skb = __skb_dequeue(&idev->mc_query_queue)))
841 kfree_skb(skb);
842 spin_unlock_bh(&idev->mc_query_lock);
843}
844
845static void mld_clear_report(struct inet6_dev *idev)
846{
847 struct sk_buff *skb;
848
849 spin_lock_bh(&idev->mc_report_lock);
850 while ((skb = __skb_dequeue(&idev->mc_report_queue)))
851 kfree_skb(skb);
852 spin_unlock_bh(&idev->mc_report_lock);
853}
854
855static void mca_get(struct ifmcaddr6 *mc)
856{
857 refcount_inc(&mc->mca_refcnt);
858}
859
860static void ma_put(struct ifmcaddr6 *mc)
861{
862 if (refcount_dec_and_test(&mc->mca_refcnt)) {
863 in6_dev_put(mc->idev);
864 kfree_rcu(mc, rcu);
865 }
866}
867
868/* called with mc_lock */
869static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
870 const struct in6_addr *addr,
871 unsigned int mode)
872{
873 struct ifmcaddr6 *mc;
874
875 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
876 if (!mc)
877 return NULL;
878
879 INIT_DELAYED_WORK(&mc->mca_work, mld_mca_work);
880
881 mc->mca_addr = *addr;
882 mc->idev = idev; /* reference taken by caller */
883 mc->mca_users = 1;
884 /* mca_stamp should be updated upon changes */
885 mc->mca_cstamp = mc->mca_tstamp = jiffies;
886 refcount_set(&mc->mca_refcnt, 1);
887
888 mc->mca_sfmode = mode;
889 mc->mca_sfcount[mode] = 1;
890
891 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
892 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
893 mc->mca_flags |= MAF_NOREPORT;
894
895 return mc;
896}
897
898/*
899 * device multicast group inc (add if not found)
900 */
901static int __ipv6_dev_mc_inc(struct net_device *dev,
902 const struct in6_addr *addr, unsigned int mode)
903{
904 struct ifmcaddr6 *mc;
905 struct inet6_dev *idev;
906
907 ASSERT_RTNL();
908
909 /* we need to take a reference on idev */
910 idev = in6_dev_get(dev);
911
912 if (!idev)
913 return -EINVAL;
914
915 if (idev->dead) {
916 in6_dev_put(idev);
917 return -ENODEV;
918 }
919
920 mutex_lock(&idev->mc_lock);
921 for_each_mc_mclock(idev, mc) {
922 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
923 mc->mca_users++;
924 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
925 mutex_unlock(&idev->mc_lock);
926 in6_dev_put(idev);
927 return 0;
928 }
929 }
930
931 mc = mca_alloc(idev, addr, mode);
932 if (!mc) {
933 mutex_unlock(&idev->mc_lock);
934 in6_dev_put(idev);
935 return -ENOMEM;
936 }
937
938 rcu_assign_pointer(mc->next, idev->mc_list);
939 rcu_assign_pointer(idev->mc_list, mc);
940
941 mca_get(mc);
942
943 mld_del_delrec(idev, mc);
944 igmp6_group_added(mc);
945 mutex_unlock(&idev->mc_lock);
946 ma_put(mc);
947 return 0;
948}
949
950int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
951{
952 return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
953}
954EXPORT_SYMBOL(ipv6_dev_mc_inc);
955
956/*
957 * device multicast group del
958 */
959int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
960{
961 struct ifmcaddr6 *ma, __rcu **map;
962
963 ASSERT_RTNL();
964
965 mutex_lock(&idev->mc_lock);
966 for (map = &idev->mc_list;
967 (ma = mc_dereference(*map, idev));
968 map = &ma->next) {
969 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
970 if (--ma->mca_users == 0) {
971 *map = ma->next;
972
973 igmp6_group_dropped(ma);
974 ip6_mc_clear_src(ma);
975 mutex_unlock(&idev->mc_lock);
976
977 ma_put(ma);
978 return 0;
979 }
980 mutex_unlock(&idev->mc_lock);
981 return 0;
982 }
983 }
984
985 mutex_unlock(&idev->mc_lock);
986 return -ENOENT;
987}
988
989int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
990{
991 struct inet6_dev *idev;
992 int err;
993
994 ASSERT_RTNL();
995
996 idev = __in6_dev_get(dev);
997 if (!idev)
998 err = -ENODEV;
999 else
1000 err = __ipv6_dev_mc_dec(idev, addr);
1001
1002 return err;
1003}
1004EXPORT_SYMBOL(ipv6_dev_mc_dec);
1005
1006/*
1007 * check if the interface/address pair is valid
1008 */
1009bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
1010 const struct in6_addr *src_addr)
1011{
1012 struct inet6_dev *idev;
1013 struct ifmcaddr6 *mc;
1014 bool rv = false;
1015
1016 rcu_read_lock();
1017 idev = __in6_dev_get(dev);
1018 if (idev) {
1019 for_each_mc_rcu(idev, mc) {
1020 if (ipv6_addr_equal(&mc->mca_addr, group))
1021 break;
1022 }
1023 if (mc) {
1024 if (src_addr && !ipv6_addr_any(src_addr)) {
1025 struct ip6_sf_list *psf;
1026
1027 for_each_psf_rcu(mc, psf) {
1028 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1029 break;
1030 }
1031 if (psf)
1032 rv = psf->sf_count[MCAST_INCLUDE] ||
1033 psf->sf_count[MCAST_EXCLUDE] !=
1034 mc->mca_sfcount[MCAST_EXCLUDE];
1035 else
1036 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
1037 } else
1038 rv = true; /* don't filter unspecified source */
1039 }
1040 }
1041 rcu_read_unlock();
1042 return rv;
1043}
1044
1045/* called with mc_lock */
1046static void mld_gq_start_work(struct inet6_dev *idev)
1047{
1048 unsigned long tv = prandom_u32() % idev->mc_maxdelay;
1049
1050 idev->mc_gq_running = 1;
1051 if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
1052 in6_dev_hold(idev);
1053}
1054
1055/* called with mc_lock */
1056static void mld_gq_stop_work(struct inet6_dev *idev)
1057{
1058 idev->mc_gq_running = 0;
1059 if (cancel_delayed_work(&idev->mc_gq_work))
1060 __in6_dev_put(idev);
1061}
1062
1063/* called with mc_lock */
1064static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1065{
1066 unsigned long tv = prandom_u32() % delay;
1067
1068 if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
1069 in6_dev_hold(idev);
1070}
1071
1072/* called with mc_lock */
1073static void mld_ifc_stop_work(struct inet6_dev *idev)
1074{
1075 idev->mc_ifc_count = 0;
1076 if (cancel_delayed_work(&idev->mc_ifc_work))
1077 __in6_dev_put(idev);
1078}
1079
1080/* called with mc_lock */
1081static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1082{
1083 unsigned long tv = prandom_u32() % delay;
1084
1085 if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
1086 in6_dev_hold(idev);
1087}
1088
1089static void mld_dad_stop_work(struct inet6_dev *idev)
1090{
1091 if (cancel_delayed_work(&idev->mc_dad_work))
1092 __in6_dev_put(idev);
1093}
1094
1095static void mld_query_stop_work(struct inet6_dev *idev)
1096{
1097 spin_lock_bh(&idev->mc_query_lock);
1098 if (cancel_delayed_work(&idev->mc_query_work))
1099 __in6_dev_put(idev);
1100 spin_unlock_bh(&idev->mc_query_lock);
1101}
1102
1103static void mld_report_stop_work(struct inet6_dev *idev)
1104{
1105 if (cancel_delayed_work_sync(&idev->mc_report_work))
1106 __in6_dev_put(idev);
1107}
1108
1109/*
1110 * IGMP handling (alias multicast ICMPv6 messages)
1111 * called with mc_lock
1112 */
1113static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1114{
1115 unsigned long delay = resptime;
1116
1117 /* Do not start work for these addresses */
1118 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1119 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1120 return;
1121
1122 if (cancel_delayed_work(&ma->mca_work)) {
1123 refcount_dec(&ma->mca_refcnt);
1124 delay = ma->mca_work.timer.expires - jiffies;
1125 }
1126
1127 if (delay >= resptime)
1128 delay = prandom_u32() % resptime;
1129
1130 if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
1131 refcount_inc(&ma->mca_refcnt);
1132 ma->mca_flags |= MAF_TIMER_RUNNING;
1133}
1134
1135/* mark EXCLUDE-mode sources
1136 * called with mc_lock
1137 */
1138static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1139 const struct in6_addr *srcs)
1140{
1141 struct ip6_sf_list *psf;
1142 int i, scount;
1143
1144 scount = 0;
1145 for_each_psf_mclock(pmc, psf) {
1146 if (scount == nsrcs)
1147 break;
1148 for (i = 0; i < nsrcs; i++) {
1149 /* skip inactive filters */
1150 if (psf->sf_count[MCAST_INCLUDE] ||
1151 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1152 psf->sf_count[MCAST_EXCLUDE])
1153 break;
1154 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1155 scount++;
1156 break;
1157 }
1158 }
1159 }
1160 pmc->mca_flags &= ~MAF_GSQUERY;
1161 if (scount == nsrcs) /* all sources excluded */
1162 return false;
1163 return true;
1164}
1165
1166/* called with mc_lock */
1167static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1168 const struct in6_addr *srcs)
1169{
1170 struct ip6_sf_list *psf;
1171 int i, scount;
1172
1173 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1174 return mld_xmarksources(pmc, nsrcs, srcs);
1175
1176 /* mark INCLUDE-mode sources */
1177
1178 scount = 0;
1179 for_each_psf_mclock(pmc, psf) {
1180 if (scount == nsrcs)
1181 break;
1182 for (i = 0; i < nsrcs; i++) {
1183 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1184 psf->sf_gsresp = 1;
1185 scount++;
1186 break;
1187 }
1188 }
1189 }
1190 if (!scount) {
1191 pmc->mca_flags &= ~MAF_GSQUERY;
1192 return false;
1193 }
1194 pmc->mca_flags |= MAF_GSQUERY;
1195 return true;
1196}
1197
1198static int mld_force_mld_version(const struct inet6_dev *idev)
1199{
1200 /* Normally, both are 0 here. If enforcement to a particular is
1201 * being used, individual device enforcement will have a lower
1202 * precedence over 'all' device (.../conf/all/force_mld_version).
1203 */
1204
1205 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1206 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1207 else
1208 return idev->cnf.force_mld_version;
1209}
1210
1211static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1212{
1213 return mld_force_mld_version(idev) == 2;
1214}
1215
1216static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1217{
1218 return mld_force_mld_version(idev) == 1;
1219}
1220
1221static bool mld_in_v1_mode(const struct inet6_dev *idev)
1222{
1223 if (mld_in_v2_mode_only(idev))
1224 return false;
1225 if (mld_in_v1_mode_only(idev))
1226 return true;
1227 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1228 return true;
1229
1230 return false;
1231}
1232
1233static void mld_set_v1_mode(struct inet6_dev *idev)
1234{
1235 /* RFC3810, relevant sections:
1236 * - 9.1. Robustness Variable
1237 * - 9.2. Query Interval
1238 * - 9.3. Query Response Interval
1239 * - 9.12. Older Version Querier Present Timeout
1240 */
1241 unsigned long switchback;
1242
1243 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1244
1245 idev->mc_v1_seen = jiffies + switchback;
1246}
1247
1248static void mld_update_qrv(struct inet6_dev *idev,
1249 const struct mld2_query *mlh2)
1250{
1251 /* RFC3810, relevant sections:
1252 * - 5.1.8. QRV (Querier's Robustness Variable)
1253 * - 9.1. Robustness Variable
1254 */
1255
1256 /* The value of the Robustness Variable MUST NOT be zero,
1257 * and SHOULD NOT be one. Catch this here if we ever run
1258 * into such a case in future.
1259 */
1260 const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1261 WARN_ON(idev->mc_qrv == 0);
1262
1263 if (mlh2->mld2q_qrv > 0)
1264 idev->mc_qrv = mlh2->mld2q_qrv;
1265
1266 if (unlikely(idev->mc_qrv < min_qrv)) {
1267 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1268 idev->mc_qrv, min_qrv);
1269 idev->mc_qrv = min_qrv;
1270 }
1271}
1272
1273static void mld_update_qi(struct inet6_dev *idev,
1274 const struct mld2_query *mlh2)
1275{
1276 /* RFC3810, relevant sections:
1277 * - 5.1.9. QQIC (Querier's Query Interval Code)
1278 * - 9.2. Query Interval
1279 * - 9.12. Older Version Querier Present Timeout
1280 * (the [Query Interval] in the last Query received)
1281 */
1282 unsigned long mc_qqi;
1283
1284 if (mlh2->mld2q_qqic < 128) {
1285 mc_qqi = mlh2->mld2q_qqic;
1286 } else {
1287 unsigned long mc_man, mc_exp;
1288
1289 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1290 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1291
1292 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1293 }
1294
1295 idev->mc_qi = mc_qqi * HZ;
1296}
1297
1298static void mld_update_qri(struct inet6_dev *idev,
1299 const struct mld2_query *mlh2)
1300{
1301 /* RFC3810, relevant sections:
1302 * - 5.1.3. Maximum Response Code
1303 * - 9.3. Query Response Interval
1304 */
1305 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1306}
1307
1308static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1309 unsigned long *max_delay, bool v1_query)
1310{
1311 unsigned long mldv1_md;
1312
1313 /* Ignore v1 queries */
1314 if (mld_in_v2_mode_only(idev))
1315 return -EINVAL;
1316
1317 mldv1_md = ntohs(mld->mld_maxdelay);
1318
1319 /* When in MLDv1 fallback and a MLDv2 router start-up being
1320 * unaware of current MLDv1 operation, the MRC == MRD mapping
1321 * only works when the exponential algorithm is not being
1322 * used (as MLDv1 is unaware of such things).
1323 *
1324 * According to the RFC author, the MLDv2 implementations
1325 * he's aware of all use a MRC < 32768 on start up queries.
1326 *
1327 * Thus, should we *ever* encounter something else larger
1328 * than that, just assume the maximum possible within our
1329 * reach.
1330 */
1331 if (!v1_query)
1332 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1333
1334 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1335
1336 /* MLDv1 router present: we need to go into v1 mode *only*
1337 * when an MLDv1 query is received as per section 9.12. of
1338 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1339 * queries MUST be of exactly 24 octets.
1340 */
1341 if (v1_query)
1342 mld_set_v1_mode(idev);
1343
1344 /* cancel MLDv2 report work */
1345 mld_gq_stop_work(idev);
1346 /* cancel the interface change work */
1347 mld_ifc_stop_work(idev);
1348 /* clear deleted report items */
1349 mld_clear_delrec(idev);
1350
1351 return 0;
1352}
1353
1354static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1355 unsigned long *max_delay)
1356{
1357 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1358
1359 mld_update_qrv(idev, mld);
1360 mld_update_qi(idev, mld);
1361 mld_update_qri(idev, mld);
1362
1363 idev->mc_maxdelay = *max_delay;
1364
1365 return 0;
1366}
1367
1368/* called with rcu_read_lock() */
1369int igmp6_event_query(struct sk_buff *skb)
1370{
1371 struct inet6_dev *idev = __in6_dev_get(skb->dev);
1372
1373 if (!idev)
1374 return -EINVAL;
1375
1376 if (idev->dead) {
1377 kfree_skb(skb);
1378 return -ENODEV;
1379 }
1380
1381 spin_lock_bh(&idev->mc_query_lock);
1382 if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
1383 __skb_queue_tail(&idev->mc_query_queue, skb);
1384 if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
1385 in6_dev_hold(idev);
1386 }
1387 spin_unlock_bh(&idev->mc_query_lock);
1388
1389 return 0;
1390}
1391
1392static void __mld_query_work(struct sk_buff *skb)
1393{
1394 struct mld2_query *mlh2 = NULL;
1395 const struct in6_addr *group;
1396 unsigned long max_delay;
1397 struct inet6_dev *idev;
1398 struct ifmcaddr6 *ma;
1399 struct mld_msg *mld;
1400 int group_type;
1401 int mark = 0;
1402 int len, err;
1403
1404 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1405 goto kfree_skb;
1406
1407 /* compute payload length excluding extension headers */
1408 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1409 len -= skb_network_header_len(skb);
1410
1411 /* RFC3810 6.2
1412 * Upon reception of an MLD message that contains a Query, the node
1413 * checks if the source address of the message is a valid link-local
1414 * address, if the Hop Limit is set to 1, and if the Router Alert
1415 * option is present in the Hop-By-Hop Options header of the IPv6
1416 * packet. If any of these checks fails, the packet is dropped.
1417 */
1418 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1419 ipv6_hdr(skb)->hop_limit != 1 ||
1420 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1421 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1422 goto kfree_skb;
1423
1424 idev = in6_dev_get(skb->dev);
1425 if (!idev)
1426 goto kfree_skb;
1427
1428 mld = (struct mld_msg *)icmp6_hdr(skb);
1429 group = &mld->mld_mca;
1430 group_type = ipv6_addr_type(group);
1431
1432 if (group_type != IPV6_ADDR_ANY &&
1433 !(group_type&IPV6_ADDR_MULTICAST))
1434 goto out;
1435
1436 if (len < MLD_V1_QUERY_LEN) {
1437 goto out;
1438 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1439 err = mld_process_v1(idev, mld, &max_delay,
1440 len == MLD_V1_QUERY_LEN);
1441 if (err < 0)
1442 goto out;
1443 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1444 int srcs_offset = sizeof(struct mld2_query) -
1445 sizeof(struct icmp6hdr);
1446
1447 if (!pskb_may_pull(skb, srcs_offset))
1448 goto out;
1449
1450 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1451
1452 err = mld_process_v2(idev, mlh2, &max_delay);
1453 if (err < 0)
1454 goto out;
1455
1456 if (group_type == IPV6_ADDR_ANY) { /* general query */
1457 if (mlh2->mld2q_nsrcs)
1458 goto out; /* no sources allowed */
1459
1460 mld_gq_start_work(idev);
1461 goto out;
1462 }
1463 /* mark sources to include, if group & source-specific */
1464 if (mlh2->mld2q_nsrcs != 0) {
1465 if (!pskb_may_pull(skb, srcs_offset +
1466 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1467 goto out;
1468
1469 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1470 mark = 1;
1471 }
1472 } else {
1473 goto out;
1474 }
1475
1476 if (group_type == IPV6_ADDR_ANY) {
1477 for_each_mc_mclock(idev, ma) {
1478 igmp6_group_queried(ma, max_delay);
1479 }
1480 } else {
1481 for_each_mc_mclock(idev, ma) {
1482 if (!ipv6_addr_equal(group, &ma->mca_addr))
1483 continue;
1484 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1485 /* gsquery <- gsquery && mark */
1486 if (!mark)
1487 ma->mca_flags &= ~MAF_GSQUERY;
1488 } else {
1489 /* gsquery <- mark */
1490 if (mark)
1491 ma->mca_flags |= MAF_GSQUERY;
1492 else
1493 ma->mca_flags &= ~MAF_GSQUERY;
1494 }
1495 if (!(ma->mca_flags & MAF_GSQUERY) ||
1496 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1497 igmp6_group_queried(ma, max_delay);
1498 break;
1499 }
1500 }
1501
1502out:
1503 in6_dev_put(idev);
1504kfree_skb:
1505 consume_skb(skb);
1506}
1507
1508static void mld_query_work(struct work_struct *work)
1509{
1510 struct inet6_dev *idev = container_of(to_delayed_work(work),
1511 struct inet6_dev,
1512 mc_query_work);
1513 struct sk_buff_head q;
1514 struct sk_buff *skb;
1515 bool rework = false;
1516 int cnt = 0;
1517
1518 skb_queue_head_init(&q);
1519
1520 spin_lock_bh(&idev->mc_query_lock);
1521 while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
1522 __skb_queue_tail(&q, skb);
1523
1524 if (++cnt >= MLD_MAX_QUEUE) {
1525 rework = true;
1526 schedule_delayed_work(&idev->mc_query_work, 0);
1527 break;
1528 }
1529 }
1530 spin_unlock_bh(&idev->mc_query_lock);
1531
1532 mutex_lock(&idev->mc_lock);
1533 while ((skb = __skb_dequeue(&q)))
1534 __mld_query_work(skb);
1535 mutex_unlock(&idev->mc_lock);
1536
1537 if (!rework)
1538 in6_dev_put(idev);
1539}
1540
1541/* called with rcu_read_lock() */
1542int igmp6_event_report(struct sk_buff *skb)
1543{
1544 struct inet6_dev *idev = __in6_dev_get(skb->dev);
1545
1546 if (!idev)
1547 return -EINVAL;
1548
1549 if (idev->dead) {
1550 kfree_skb(skb);
1551 return -ENODEV;
1552 }
1553
1554 spin_lock_bh(&idev->mc_report_lock);
1555 if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
1556 __skb_queue_tail(&idev->mc_report_queue, skb);
1557 if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
1558 in6_dev_hold(idev);
1559 }
1560 spin_unlock_bh(&idev->mc_report_lock);
1561
1562 return 0;
1563}
1564
1565static void __mld_report_work(struct sk_buff *skb)
1566{
1567 struct inet6_dev *idev;
1568 struct ifmcaddr6 *ma;
1569 struct mld_msg *mld;
1570 int addr_type;
1571
1572 /* Our own report looped back. Ignore it. */
1573 if (skb->pkt_type == PACKET_LOOPBACK)
1574 goto kfree_skb;
1575
1576 /* send our report if the MC router may not have heard this report */
1577 if (skb->pkt_type != PACKET_MULTICAST &&
1578 skb->pkt_type != PACKET_BROADCAST)
1579 goto kfree_skb;
1580
1581 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1582 goto kfree_skb;
1583
1584 mld = (struct mld_msg *)icmp6_hdr(skb);
1585
1586 /* Drop reports with not link local source */
1587 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1588 if (addr_type != IPV6_ADDR_ANY &&
1589 !(addr_type&IPV6_ADDR_LINKLOCAL))
1590 goto kfree_skb;
1591
1592 idev = in6_dev_get(skb->dev);
1593 if (!idev)
1594 goto kfree_skb;
1595
1596 /*
1597 * Cancel the work for this group
1598 */
1599
1600 for_each_mc_mclock(idev, ma) {
1601 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1602 if (cancel_delayed_work(&ma->mca_work))
1603 refcount_dec(&ma->mca_refcnt);
1604 ma->mca_flags &= ~(MAF_LAST_REPORTER |
1605 MAF_TIMER_RUNNING);
1606 break;
1607 }
1608 }
1609
1610 in6_dev_put(idev);
1611kfree_skb:
1612 consume_skb(skb);
1613}
1614
1615static void mld_report_work(struct work_struct *work)
1616{
1617 struct inet6_dev *idev = container_of(to_delayed_work(work),
1618 struct inet6_dev,
1619 mc_report_work);
1620 struct sk_buff_head q;
1621 struct sk_buff *skb;
1622 bool rework = false;
1623 int cnt = 0;
1624
1625 skb_queue_head_init(&q);
1626 spin_lock_bh(&idev->mc_report_lock);
1627 while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
1628 __skb_queue_tail(&q, skb);
1629
1630 if (++cnt >= MLD_MAX_QUEUE) {
1631 rework = true;
1632 schedule_delayed_work(&idev->mc_report_work, 0);
1633 break;
1634 }
1635 }
1636 spin_unlock_bh(&idev->mc_report_lock);
1637
1638 mutex_lock(&idev->mc_lock);
1639 while ((skb = __skb_dequeue(&q)))
1640 __mld_report_work(skb);
1641 mutex_unlock(&idev->mc_lock);
1642
1643 if (!rework)
1644 in6_dev_put(idev);
1645}
1646
1647static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1648 int gdeleted, int sdeleted)
1649{
1650 switch (type) {
1651 case MLD2_MODE_IS_INCLUDE:
1652 case MLD2_MODE_IS_EXCLUDE:
1653 if (gdeleted || sdeleted)
1654 return false;
1655 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1656 if (pmc->mca_sfmode == MCAST_INCLUDE)
1657 return true;
1658 /* don't include if this source is excluded
1659 * in all filters
1660 */
1661 if (psf->sf_count[MCAST_INCLUDE])
1662 return type == MLD2_MODE_IS_INCLUDE;
1663 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1664 psf->sf_count[MCAST_EXCLUDE];
1665 }
1666 return false;
1667 case MLD2_CHANGE_TO_INCLUDE:
1668 if (gdeleted || sdeleted)
1669 return false;
1670 return psf->sf_count[MCAST_INCLUDE] != 0;
1671 case MLD2_CHANGE_TO_EXCLUDE:
1672 if (gdeleted || sdeleted)
1673 return false;
1674 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1675 psf->sf_count[MCAST_INCLUDE])
1676 return false;
1677 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1678 psf->sf_count[MCAST_EXCLUDE];
1679 case MLD2_ALLOW_NEW_SOURCES:
1680 if (gdeleted || !psf->sf_crcount)
1681 return false;
1682 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1683 case MLD2_BLOCK_OLD_SOURCES:
1684 if (pmc->mca_sfmode == MCAST_INCLUDE)
1685 return gdeleted || (psf->sf_crcount && sdeleted);
1686 return psf->sf_crcount && !gdeleted && !sdeleted;
1687 }
1688 return false;
1689}
1690
1691static int
1692mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1693{
1694 struct ip6_sf_list *psf;
1695 int scount = 0;
1696
1697 for_each_psf_mclock(pmc, psf) {
1698 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1699 continue;
1700 scount++;
1701 }
1702 return scount;
1703}
1704
1705static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1706 struct net_device *dev,
1707 const struct in6_addr *saddr,
1708 const struct in6_addr *daddr,
1709 int proto, int len)
1710{
1711 struct ipv6hdr *hdr;
1712
1713 skb->protocol = htons(ETH_P_IPV6);
1714 skb->dev = dev;
1715
1716 skb_reset_network_header(skb);
1717 skb_put(skb, sizeof(struct ipv6hdr));
1718 hdr = ipv6_hdr(skb);
1719
1720 ip6_flow_hdr(hdr, 0, 0);
1721
1722 hdr->payload_len = htons(len);
1723 hdr->nexthdr = proto;
1724 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1725
1726 hdr->saddr = *saddr;
1727 hdr->daddr = *daddr;
1728}
1729
1730static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1731{
1732 u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
1733 2, 0, 0, IPV6_TLV_PADN, 0 };
1734 struct net_device *dev = idev->dev;
1735 int hlen = LL_RESERVED_SPACE(dev);
1736 int tlen = dev->needed_tailroom;
1737 struct net *net = dev_net(dev);
1738 const struct in6_addr *saddr;
1739 struct in6_addr addr_buf;
1740 struct mld2_report *pmr;
1741 struct sk_buff *skb;
1742 unsigned int size;
1743 struct sock *sk;
1744 int err;
1745
1746 sk = net->ipv6.igmp_sk;
1747 /* we assume size > sizeof(ra) here
1748 * Also try to not allocate high-order pages for big MTU
1749 */
1750 size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
1751 skb = sock_alloc_send_skb(sk, size, 1, &err);
1752 if (!skb)
1753 return NULL;
1754
1755 skb->priority = TC_PRIO_CONTROL;
1756 skb_reserve(skb, hlen);
1757 skb_tailroom_reserve(skb, mtu, tlen);
1758
1759 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
1760 /* <draft-ietf-magma-mld-source-05.txt>:
1761 * use unspecified address as the source address
1762 * when a valid link-local address is not available.
1763 */
1764 saddr = &in6addr_any;
1765 } else
1766 saddr = &addr_buf;
1767
1768 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1769
1770 skb_put_data(skb, ra, sizeof(ra));
1771
1772 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1773 skb_put(skb, sizeof(*pmr));
1774 pmr = (struct mld2_report *)skb_transport_header(skb);
1775 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1776 pmr->mld2r_resv1 = 0;
1777 pmr->mld2r_cksum = 0;
1778 pmr->mld2r_resv2 = 0;
1779 pmr->mld2r_ngrec = 0;
1780 return skb;
1781}
1782
1783static void mld_sendpack(struct sk_buff *skb)
1784{
1785 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1786 struct mld2_report *pmr =
1787 (struct mld2_report *)skb_transport_header(skb);
1788 int payload_len, mldlen;
1789 struct inet6_dev *idev;
1790 struct net *net = dev_net(skb->dev);
1791 int err;
1792 struct flowi6 fl6;
1793 struct dst_entry *dst;
1794
1795 rcu_read_lock();
1796 idev = __in6_dev_get(skb->dev);
1797 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1798
1799 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1800 sizeof(*pip6);
1801 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1802 pip6->payload_len = htons(payload_len);
1803
1804 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1805 IPPROTO_ICMPV6,
1806 csum_partial(skb_transport_header(skb),
1807 mldlen, 0));
1808
1809 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1810 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1811 skb->dev->ifindex);
1812 dst = icmp6_dst_alloc(skb->dev, &fl6);
1813
1814 err = 0;
1815 if (IS_ERR(dst)) {
1816 err = PTR_ERR(dst);
1817 dst = NULL;
1818 }
1819 skb_dst_set(skb, dst);
1820 if (err)
1821 goto err_out;
1822
1823 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1824 net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1825 dst_output);
1826out:
1827 if (!err) {
1828 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1829 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1830 } else {
1831 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1832 }
1833
1834 rcu_read_unlock();
1835 return;
1836
1837err_out:
1838 kfree_skb(skb);
1839 goto out;
1840}
1841
1842static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1843{
1844 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1845}
1846
1847static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1848 int type, struct mld2_grec **ppgr, unsigned int mtu)
1849{
1850 struct mld2_report *pmr;
1851 struct mld2_grec *pgr;
1852
1853 if (!skb) {
1854 skb = mld_newpack(pmc->idev, mtu);
1855 if (!skb)
1856 return NULL;
1857 }
1858 pgr = skb_put(skb, sizeof(struct mld2_grec));
1859 pgr->grec_type = type;
1860 pgr->grec_auxwords = 0;
1861 pgr->grec_nsrcs = 0;
1862 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1863 pmr = (struct mld2_report *)skb_transport_header(skb);
1864 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1865 *ppgr = pgr;
1866 return skb;
1867}
1868
1869#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1870
1871/* called with mc_lock */
1872static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1873 int type, int gdeleted, int sdeleted,
1874 int crsend)
1875{
1876 struct ip6_sf_list *psf, *psf_prev, *psf_next;
1877 int scount, stotal, first, isquery, truncate;
1878 struct ip6_sf_list __rcu **psf_list;
1879 struct inet6_dev *idev = pmc->idev;
1880 struct net_device *dev = idev->dev;
1881 struct mld2_grec *pgr = NULL;
1882 struct mld2_report *pmr;
1883 unsigned int mtu;
1884
1885 if (pmc->mca_flags & MAF_NOREPORT)
1886 return skb;
1887
1888 mtu = READ_ONCE(dev->mtu);
1889 if (mtu < IPV6_MIN_MTU)
1890 return skb;
1891
1892 isquery = type == MLD2_MODE_IS_INCLUDE ||
1893 type == MLD2_MODE_IS_EXCLUDE;
1894 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1895 type == MLD2_CHANGE_TO_EXCLUDE;
1896
1897 stotal = scount = 0;
1898
1899 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1900
1901 if (!rcu_access_pointer(*psf_list))
1902 goto empty_source;
1903
1904 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1905
1906 /* EX and TO_EX get a fresh packet, if needed */
1907 if (truncate) {
1908 if (pmr && pmr->mld2r_ngrec &&
1909 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1910 if (skb)
1911 mld_sendpack(skb);
1912 skb = mld_newpack(idev, mtu);
1913 }
1914 }
1915 first = 1;
1916 psf_prev = NULL;
1917 for (psf = mc_dereference(*psf_list, idev);
1918 psf;
1919 psf = psf_next) {
1920 struct in6_addr *psrc;
1921
1922 psf_next = mc_dereference(psf->sf_next, idev);
1923
1924 if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1925 psf_prev = psf;
1926 continue;
1927 }
1928
1929 /* Based on RFC3810 6.1. Should not send source-list change
1930 * records when there is a filter mode change.
1931 */
1932 if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1933 (!gdeleted && pmc->mca_crcount)) &&
1934 (type == MLD2_ALLOW_NEW_SOURCES ||
1935 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1936 goto decrease_sf_crcount;
1937
1938 /* clear marks on query responses */
1939 if (isquery)
1940 psf->sf_gsresp = 0;
1941
1942 if (AVAILABLE(skb) < sizeof(*psrc) +
1943 first*sizeof(struct mld2_grec)) {
1944 if (truncate && !first)
1945 break; /* truncate these */
1946 if (pgr)
1947 pgr->grec_nsrcs = htons(scount);
1948 if (skb)
1949 mld_sendpack(skb);
1950 skb = mld_newpack(idev, mtu);
1951 first = 1;
1952 scount = 0;
1953 }
1954 if (first) {
1955 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1956 first = 0;
1957 }
1958 if (!skb)
1959 return NULL;
1960 psrc = skb_put(skb, sizeof(*psrc));
1961 *psrc = psf->sf_addr;
1962 scount++; stotal++;
1963 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1964 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1965decrease_sf_crcount:
1966 psf->sf_crcount--;
1967 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1968 if (psf_prev)
1969 rcu_assign_pointer(psf_prev->sf_next,
1970 mc_dereference(psf->sf_next, idev));
1971 else
1972 rcu_assign_pointer(*psf_list,
1973 mc_dereference(psf->sf_next, idev));
1974 kfree_rcu(psf, rcu);
1975 continue;
1976 }
1977 }
1978 psf_prev = psf;
1979 }
1980
1981empty_source:
1982 if (!stotal) {
1983 if (type == MLD2_ALLOW_NEW_SOURCES ||
1984 type == MLD2_BLOCK_OLD_SOURCES)
1985 return skb;
1986 if (pmc->mca_crcount || isquery || crsend) {
1987 /* make sure we have room for group header */
1988 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1989 mld_sendpack(skb);
1990 skb = NULL; /* add_grhead will get a new one */
1991 }
1992 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1993 }
1994 }
1995 if (pgr)
1996 pgr->grec_nsrcs = htons(scount);
1997
1998 if (isquery)
1999 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
2000 return skb;
2001}
2002
2003/* called with mc_lock */
2004static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2005{
2006 struct sk_buff *skb = NULL;
2007 int type;
2008
2009 if (!pmc) {
2010 for_each_mc_mclock(idev, pmc) {
2011 if (pmc->mca_flags & MAF_NOREPORT)
2012 continue;
2013 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2014 type = MLD2_MODE_IS_EXCLUDE;
2015 else
2016 type = MLD2_MODE_IS_INCLUDE;
2017 skb = add_grec(skb, pmc, type, 0, 0, 0);
2018 }
2019 } else {
2020 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2021 type = MLD2_MODE_IS_EXCLUDE;
2022 else
2023 type = MLD2_MODE_IS_INCLUDE;
2024 skb = add_grec(skb, pmc, type, 0, 0, 0);
2025 }
2026 if (skb)
2027 mld_sendpack(skb);
2028}
2029
2030/*
2031 * remove zero-count source records from a source filter list
2032 * called with mc_lock
2033 */
2034static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2035{
2036 struct ip6_sf_list *psf_prev, *psf_next, *psf;
2037
2038 psf_prev = NULL;
2039 for (psf = mc_dereference(*ppsf, idev);
2040 psf;
2041 psf = psf_next) {
2042 psf_next = mc_dereference(psf->sf_next, idev);
2043 if (psf->sf_crcount == 0) {
2044 if (psf_prev)
2045 rcu_assign_pointer(psf_prev->sf_next,
2046 mc_dereference(psf->sf_next, idev));
2047 else
2048 rcu_assign_pointer(*ppsf,
2049 mc_dereference(psf->sf_next, idev));
2050 kfree_rcu(psf, rcu);
2051 } else {
2052 psf_prev = psf;
2053 }
2054 }
2055}
2056
2057/* called with mc_lock */
2058static void mld_send_cr(struct inet6_dev *idev)
2059{
2060 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
2061 struct sk_buff *skb = NULL;
2062 int type, dtype;
2063
2064 /* deleted MCA's */
2065 pmc_prev = NULL;
2066 for (pmc = mc_dereference(idev->mc_tomb, idev);
2067 pmc;
2068 pmc = pmc_next) {
2069 pmc_next = mc_dereference(pmc->next, idev);
2070 if (pmc->mca_sfmode == MCAST_INCLUDE) {
2071 type = MLD2_BLOCK_OLD_SOURCES;
2072 dtype = MLD2_BLOCK_OLD_SOURCES;
2073 skb = add_grec(skb, pmc, type, 1, 0, 0);
2074 skb = add_grec(skb, pmc, dtype, 1, 1, 0);
2075 }
2076 if (pmc->mca_crcount) {
2077 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
2078 type = MLD2_CHANGE_TO_INCLUDE;
2079 skb = add_grec(skb, pmc, type, 1, 0, 0);
2080 }
2081 pmc->mca_crcount--;
2082 if (pmc->mca_crcount == 0) {
2083 mld_clear_zeros(&pmc->mca_tomb, idev);
2084 mld_clear_zeros(&pmc->mca_sources, idev);
2085 }
2086 }
2087 if (pmc->mca_crcount == 0 &&
2088 !rcu_access_pointer(pmc->mca_tomb) &&
2089 !rcu_access_pointer(pmc->mca_sources)) {
2090 if (pmc_prev)
2091 rcu_assign_pointer(pmc_prev->next, pmc_next);
2092 else
2093 rcu_assign_pointer(idev->mc_tomb, pmc_next);
2094 in6_dev_put(pmc->idev);
2095 kfree_rcu(pmc, rcu);
2096 } else
2097 pmc_prev = pmc;
2098 }
2099
2100 /* change recs */
2101 for_each_mc_mclock(idev, pmc) {
2102 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2103 type = MLD2_BLOCK_OLD_SOURCES;
2104 dtype = MLD2_ALLOW_NEW_SOURCES;
2105 } else {
2106 type = MLD2_ALLOW_NEW_SOURCES;
2107 dtype = MLD2_BLOCK_OLD_SOURCES;
2108 }
2109 skb = add_grec(skb, pmc, type, 0, 0, 0);
2110 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
2111
2112 /* filter mode changes */
2113 if (pmc->mca_crcount) {
2114 if (pmc->mca_sfmode == MCAST_EXCLUDE)
2115 type = MLD2_CHANGE_TO_EXCLUDE;
2116 else
2117 type = MLD2_CHANGE_TO_INCLUDE;
2118 skb = add_grec(skb, pmc, type, 0, 0, 0);
2119 pmc->mca_crcount--;
2120 }
2121 }
2122 if (!skb)
2123 return;
2124 (void) mld_sendpack(skb);
2125}
2126
2127static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
2128{
2129 struct net *net = dev_net(dev);
2130 struct sock *sk = net->ipv6.igmp_sk;
2131 struct inet6_dev *idev;
2132 struct sk_buff *skb;
2133 struct mld_msg *hdr;
2134 const struct in6_addr *snd_addr, *saddr;
2135 struct in6_addr addr_buf;
2136 int hlen = LL_RESERVED_SPACE(dev);
2137 int tlen = dev->needed_tailroom;
2138 int err, len, payload_len, full_len;
2139 u8 ra[8] = { IPPROTO_ICMPV6, 0,
2140 IPV6_TLV_ROUTERALERT, 2, 0, 0,
2141 IPV6_TLV_PADN, 0 };
2142 struct flowi6 fl6;
2143 struct dst_entry *dst;
2144
2145 if (type == ICMPV6_MGM_REDUCTION)
2146 snd_addr = &in6addr_linklocal_allrouters;
2147 else
2148 snd_addr = addr;
2149
2150 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2151 payload_len = len + sizeof(ra);
2152 full_len = sizeof(struct ipv6hdr) + payload_len;
2153
2154 rcu_read_lock();
2155 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
2156 IPSTATS_MIB_OUT, full_len);
2157 rcu_read_unlock();
2158
2159 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
2160
2161 if (!skb) {
2162 rcu_read_lock();
2163 IP6_INC_STATS(net, __in6_dev_get(dev),
2164 IPSTATS_MIB_OUTDISCARDS);
2165 rcu_read_unlock();
2166 return;
2167 }
2168 skb->priority = TC_PRIO_CONTROL;
2169 skb_reserve(skb, hlen);
2170
2171 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2172 /* <draft-ietf-magma-mld-source-05.txt>:
2173 * use unspecified address as the source address
2174 * when a valid link-local address is not available.
2175 */
2176 saddr = &in6addr_any;
2177 } else
2178 saddr = &addr_buf;
2179
2180 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2181
2182 skb_put_data(skb, ra, sizeof(ra));
2183
2184 hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2185 hdr->mld_type = type;
2186 hdr->mld_mca = *addr;
2187
2188 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2189 IPPROTO_ICMPV6,
2190 csum_partial(hdr, len, 0));
2191
2192 rcu_read_lock();
2193 idev = __in6_dev_get(skb->dev);
2194
2195 icmpv6_flow_init(sk, &fl6, type,
2196 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2197 skb->dev->ifindex);
2198 dst = icmp6_dst_alloc(skb->dev, &fl6);
2199 if (IS_ERR(dst)) {
2200 err = PTR_ERR(dst);
2201 goto err_out;
2202 }
2203
2204 skb_dst_set(skb, dst);
2205 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2206 net, sk, skb, NULL, skb->dev,
2207 dst_output);
2208out:
2209 if (!err) {
2210 ICMP6MSGOUT_INC_STATS(net, idev, type);
2211 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2212 } else
2213 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2214
2215 rcu_read_unlock();
2216 return;
2217
2218err_out:
2219 kfree_skb(skb);
2220 goto out;
2221}
2222
2223/* called with mc_lock */
2224static void mld_send_initial_cr(struct inet6_dev *idev)
2225{
2226 struct sk_buff *skb;
2227 struct ifmcaddr6 *pmc;
2228 int type;
2229
2230 if (mld_in_v1_mode(idev))
2231 return;
2232
2233 skb = NULL;
2234 for_each_mc_mclock(idev, pmc) {
2235 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2236 type = MLD2_CHANGE_TO_EXCLUDE;
2237 else
2238 type = MLD2_ALLOW_NEW_SOURCES;
2239 skb = add_grec(skb, pmc, type, 0, 0, 1);
2240 }
2241 if (skb)
2242 mld_sendpack(skb);
2243}
2244
2245void ipv6_mc_dad_complete(struct inet6_dev *idev)
2246{
2247 mutex_lock(&idev->mc_lock);
2248 idev->mc_dad_count = idev->mc_qrv;
2249 if (idev->mc_dad_count) {
2250 mld_send_initial_cr(idev);
2251 idev->mc_dad_count--;
2252 if (idev->mc_dad_count)
2253 mld_dad_start_work(idev,
2254 unsolicited_report_interval(idev));
2255 }
2256 mutex_unlock(&idev->mc_lock);
2257}
2258
2259static void mld_dad_work(struct work_struct *work)
2260{
2261 struct inet6_dev *idev = container_of(to_delayed_work(work),
2262 struct inet6_dev,
2263 mc_dad_work);
2264 mutex_lock(&idev->mc_lock);
2265 mld_send_initial_cr(idev);
2266 if (idev->mc_dad_count) {
2267 idev->mc_dad_count--;
2268 if (idev->mc_dad_count)
2269 mld_dad_start_work(idev,
2270 unsolicited_report_interval(idev));
2271 }
2272 mutex_unlock(&idev->mc_lock);
2273 in6_dev_put(idev);
2274}
2275
2276/* called with mc_lock */
2277static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2278 const struct in6_addr *psfsrc)
2279{
2280 struct ip6_sf_list *psf, *psf_prev;
2281 int rv = 0;
2282
2283 psf_prev = NULL;
2284 for_each_psf_mclock(pmc, psf) {
2285 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2286 break;
2287 psf_prev = psf;
2288 }
2289 if (!psf || psf->sf_count[sfmode] == 0) {
2290 /* source filter not found, or count wrong => bug */
2291 return -ESRCH;
2292 }
2293 psf->sf_count[sfmode]--;
2294 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2295 struct inet6_dev *idev = pmc->idev;
2296
2297 /* no more filters for this source */
2298 if (psf_prev)
2299 rcu_assign_pointer(psf_prev->sf_next,
2300 mc_dereference(psf->sf_next, idev));
2301 else
2302 rcu_assign_pointer(pmc->mca_sources,
2303 mc_dereference(psf->sf_next, idev));
2304
2305 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2306 !mld_in_v1_mode(idev)) {
2307 psf->sf_crcount = idev->mc_qrv;
2308 rcu_assign_pointer(psf->sf_next,
2309 mc_dereference(pmc->mca_tomb, idev));
2310 rcu_assign_pointer(pmc->mca_tomb, psf);
2311 rv = 1;
2312 } else {
2313 kfree_rcu(psf, rcu);
2314 }
2315 }
2316 return rv;
2317}
2318
2319/* called with mc_lock */
2320static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2321 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2322 int delta)
2323{
2324 struct ifmcaddr6 *pmc;
2325 int changerec = 0;
2326 int i, err;
2327
2328 if (!idev)
2329 return -ENODEV;
2330
2331 for_each_mc_mclock(idev, pmc) {
2332 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2333 break;
2334 }
2335 if (!pmc)
2336 return -ESRCH;
2337
2338 sf_markstate(pmc);
2339 if (!delta) {
2340 if (!pmc->mca_sfcount[sfmode])
2341 return -EINVAL;
2342
2343 pmc->mca_sfcount[sfmode]--;
2344 }
2345 err = 0;
2346 for (i = 0; i < sfcount; i++) {
2347 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2348
2349 changerec |= rv > 0;
2350 if (!err && rv < 0)
2351 err = rv;
2352 }
2353 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2354 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2355 pmc->mca_sfcount[MCAST_INCLUDE]) {
2356 struct ip6_sf_list *psf;
2357
2358 /* filter mode change */
2359 pmc->mca_sfmode = MCAST_INCLUDE;
2360 pmc->mca_crcount = idev->mc_qrv;
2361 idev->mc_ifc_count = pmc->mca_crcount;
2362 for_each_psf_mclock(pmc, psf)
2363 psf->sf_crcount = 0;
2364 mld_ifc_event(pmc->idev);
2365 } else if (sf_setstate(pmc) || changerec) {
2366 mld_ifc_event(pmc->idev);
2367 }
2368
2369 return err;
2370}
2371
2372/*
2373 * Add multicast single-source filter to the interface list
2374 * called with mc_lock
2375 */
2376static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2377 const struct in6_addr *psfsrc)
2378{
2379 struct ip6_sf_list *psf, *psf_prev;
2380
2381 psf_prev = NULL;
2382 for_each_psf_mclock(pmc, psf) {
2383 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2384 break;
2385 psf_prev = psf;
2386 }
2387 if (!psf) {
2388 psf = kzalloc(sizeof(*psf), GFP_KERNEL);
2389 if (!psf)
2390 return -ENOBUFS;
2391
2392 psf->sf_addr = *psfsrc;
2393 if (psf_prev) {
2394 rcu_assign_pointer(psf_prev->sf_next, psf);
2395 } else {
2396 rcu_assign_pointer(pmc->mca_sources, psf);
2397 }
2398 }
2399 psf->sf_count[sfmode]++;
2400 return 0;
2401}
2402
2403/* called with mc_lock */
2404static void sf_markstate(struct ifmcaddr6 *pmc)
2405{
2406 struct ip6_sf_list *psf;
2407 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2408
2409 for_each_psf_mclock(pmc, psf) {
2410 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2411 psf->sf_oldin = mca_xcount ==
2412 psf->sf_count[MCAST_EXCLUDE] &&
2413 !psf->sf_count[MCAST_INCLUDE];
2414 } else {
2415 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2416 }
2417 }
2418}
2419
2420/* called with mc_lock */
2421static int sf_setstate(struct ifmcaddr6 *pmc)
2422{
2423 struct ip6_sf_list *psf, *dpsf;
2424 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2425 int qrv = pmc->idev->mc_qrv;
2426 int new_in, rv;
2427
2428 rv = 0;
2429 for_each_psf_mclock(pmc, psf) {
2430 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2431 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2432 !psf->sf_count[MCAST_INCLUDE];
2433 } else
2434 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2435 if (new_in) {
2436 if (!psf->sf_oldin) {
2437 struct ip6_sf_list *prev = NULL;
2438
2439 for_each_psf_tomb(pmc, dpsf) {
2440 if (ipv6_addr_equal(&dpsf->sf_addr,
2441 &psf->sf_addr))
2442 break;
2443 prev = dpsf;
2444 }
2445 if (dpsf) {
2446 if (prev)
2447 rcu_assign_pointer(prev->sf_next,
2448 mc_dereference(dpsf->sf_next,
2449 pmc->idev));
2450 else
2451 rcu_assign_pointer(pmc->mca_tomb,
2452 mc_dereference(dpsf->sf_next,
2453 pmc->idev));
2454 kfree_rcu(dpsf, rcu);
2455 }
2456 psf->sf_crcount = qrv;
2457 rv++;
2458 }
2459 } else if (psf->sf_oldin) {
2460 psf->sf_crcount = 0;
2461 /*
2462 * add or update "delete" records if an active filter
2463 * is now inactive
2464 */
2465
2466 for_each_psf_tomb(pmc, dpsf)
2467 if (ipv6_addr_equal(&dpsf->sf_addr,
2468 &psf->sf_addr))
2469 break;
2470 if (!dpsf) {
2471 dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL);
2472 if (!dpsf)
2473 continue;
2474 *dpsf = *psf;
2475 rcu_assign_pointer(dpsf->sf_next,
2476 mc_dereference(pmc->mca_tomb, pmc->idev));
2477 rcu_assign_pointer(pmc->mca_tomb, dpsf);
2478 }
2479 dpsf->sf_crcount = qrv;
2480 rv++;
2481 }
2482 }
2483 return rv;
2484}
2485
2486/*
2487 * Add multicast source filter list to the interface list
2488 * called with mc_lock
2489 */
2490static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2491 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2492 int delta)
2493{
2494 struct ifmcaddr6 *pmc;
2495 int isexclude;
2496 int i, err;
2497
2498 if (!idev)
2499 return -ENODEV;
2500
2501 for_each_mc_mclock(idev, pmc) {
2502 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2503 break;
2504 }
2505 if (!pmc)
2506 return -ESRCH;
2507
2508 sf_markstate(pmc);
2509 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2510 if (!delta)
2511 pmc->mca_sfcount[sfmode]++;
2512 err = 0;
2513 for (i = 0; i < sfcount; i++) {
2514 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2515 if (err)
2516 break;
2517 }
2518 if (err) {
2519 int j;
2520
2521 if (!delta)
2522 pmc->mca_sfcount[sfmode]--;
2523 for (j = 0; j < i; j++)
2524 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2525 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2526 struct ip6_sf_list *psf;
2527
2528 /* filter mode change */
2529 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2530 pmc->mca_sfmode = MCAST_EXCLUDE;
2531 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2532 pmc->mca_sfmode = MCAST_INCLUDE;
2533 /* else no filters; keep old mode for reports */
2534
2535 pmc->mca_crcount = idev->mc_qrv;
2536 idev->mc_ifc_count = pmc->mca_crcount;
2537 for_each_psf_mclock(pmc, psf)
2538 psf->sf_crcount = 0;
2539 mld_ifc_event(idev);
2540 } else if (sf_setstate(pmc)) {
2541 mld_ifc_event(idev);
2542 }
2543 return err;
2544}
2545
2546/* called with mc_lock */
2547static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2548{
2549 struct ip6_sf_list *psf, *nextpsf;
2550
2551 for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2552 psf;
2553 psf = nextpsf) {
2554 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2555 kfree_rcu(psf, rcu);
2556 }
2557 RCU_INIT_POINTER(pmc->mca_tomb, NULL);
2558 for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2559 psf;
2560 psf = nextpsf) {
2561 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2562 kfree_rcu(psf, rcu);
2563 }
2564 RCU_INIT_POINTER(pmc->mca_sources, NULL);
2565 pmc->mca_sfmode = MCAST_EXCLUDE;
2566 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2567 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2568}
2569
2570/* called with mc_lock */
2571static void igmp6_join_group(struct ifmcaddr6 *ma)
2572{
2573 unsigned long delay;
2574
2575 if (ma->mca_flags & MAF_NOREPORT)
2576 return;
2577
2578 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2579
2580 delay = prandom_u32() % unsolicited_report_interval(ma->idev);
2581
2582 if (cancel_delayed_work(&ma->mca_work)) {
2583 refcount_dec(&ma->mca_refcnt);
2584 delay = ma->mca_work.timer.expires - jiffies;
2585 }
2586
2587 if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
2588 refcount_inc(&ma->mca_refcnt);
2589 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2590}
2591
2592static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2593 struct inet6_dev *idev)
2594{
2595 struct ip6_sf_socklist *psl;
2596 int err;
2597
2598 psl = sock_dereference(iml->sflist, sk);
2599
2600 if (idev)
2601 mutex_lock(&idev->mc_lock);
2602
2603 if (!psl) {
2604 /* any-source empty exclude case */
2605 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2606 } else {
2607 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2608 psl->sl_count, psl->sl_addr, 0);
2609 RCU_INIT_POINTER(iml->sflist, NULL);
2610 atomic_sub(IP6_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2611 kfree_rcu(psl, rcu);
2612 }
2613
2614 if (idev)
2615 mutex_unlock(&idev->mc_lock);
2616
2617 return err;
2618}
2619
2620/* called with mc_lock */
2621static void igmp6_leave_group(struct ifmcaddr6 *ma)
2622{
2623 if (mld_in_v1_mode(ma->idev)) {
2624 if (ma->mca_flags & MAF_LAST_REPORTER) {
2625 igmp6_send(&ma->mca_addr, ma->idev->dev,
2626 ICMPV6_MGM_REDUCTION);
2627 }
2628 } else {
2629 mld_add_delrec(ma->idev, ma);
2630 mld_ifc_event(ma->idev);
2631 }
2632}
2633
2634static void mld_gq_work(struct work_struct *work)
2635{
2636 struct inet6_dev *idev = container_of(to_delayed_work(work),
2637 struct inet6_dev,
2638 mc_gq_work);
2639
2640 mutex_lock(&idev->mc_lock);
2641 mld_send_report(idev, NULL);
2642 idev->mc_gq_running = 0;
2643 mutex_unlock(&idev->mc_lock);
2644
2645 in6_dev_put(idev);
2646}
2647
2648static void mld_ifc_work(struct work_struct *work)
2649{
2650 struct inet6_dev *idev = container_of(to_delayed_work(work),
2651 struct inet6_dev,
2652 mc_ifc_work);
2653
2654 mutex_lock(&idev->mc_lock);
2655 mld_send_cr(idev);
2656
2657 if (idev->mc_ifc_count) {
2658 idev->mc_ifc_count--;
2659 if (idev->mc_ifc_count)
2660 mld_ifc_start_work(idev,
2661 unsolicited_report_interval(idev));
2662 }
2663 mutex_unlock(&idev->mc_lock);
2664 in6_dev_put(idev);
2665}
2666
2667/* called with mc_lock */
2668static void mld_ifc_event(struct inet6_dev *idev)
2669{
2670 if (mld_in_v1_mode(idev))
2671 return;
2672
2673 idev->mc_ifc_count = idev->mc_qrv;
2674 mld_ifc_start_work(idev, 1);
2675}
2676
2677static void mld_mca_work(struct work_struct *work)
2678{
2679 struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
2680 struct ifmcaddr6, mca_work);
2681
2682 mutex_lock(&ma->idev->mc_lock);
2683 if (mld_in_v1_mode(ma->idev))
2684 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2685 else
2686 mld_send_report(ma->idev, ma);
2687 ma->mca_flags |= MAF_LAST_REPORTER;
2688 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2689 mutex_unlock(&ma->idev->mc_lock);
2690
2691 ma_put(ma);
2692}
2693
2694/* Device changing type */
2695
2696void ipv6_mc_unmap(struct inet6_dev *idev)
2697{
2698 struct ifmcaddr6 *i;
2699
2700 /* Install multicast list, except for all-nodes (already installed) */
2701
2702 mutex_lock(&idev->mc_lock);
2703 for_each_mc_mclock(idev, i)
2704 igmp6_group_dropped(i);
2705 mutex_unlock(&idev->mc_lock);
2706}
2707
2708void ipv6_mc_remap(struct inet6_dev *idev)
2709{
2710 ipv6_mc_up(idev);
2711}
2712
2713/* Device going down */
2714void ipv6_mc_down(struct inet6_dev *idev)
2715{
2716 struct ifmcaddr6 *i;
2717
2718 mutex_lock(&idev->mc_lock);
2719 /* Withdraw multicast list */
2720 for_each_mc_mclock(idev, i)
2721 igmp6_group_dropped(i);
2722 mutex_unlock(&idev->mc_lock);
2723
2724 /* Should stop work after group drop. or we will
2725 * start work again in mld_ifc_event()
2726 */
2727 synchronize_net();
2728 mld_query_stop_work(idev);
2729 mld_report_stop_work(idev);
2730 mld_ifc_stop_work(idev);
2731 mld_gq_stop_work(idev);
2732 mld_dad_stop_work(idev);
2733}
2734
2735static void ipv6_mc_reset(struct inet6_dev *idev)
2736{
2737 idev->mc_qrv = sysctl_mld_qrv;
2738 idev->mc_qi = MLD_QI_DEFAULT;
2739 idev->mc_qri = MLD_QRI_DEFAULT;
2740 idev->mc_v1_seen = 0;
2741 idev->mc_maxdelay = unsolicited_report_interval(idev);
2742}
2743
2744/* Device going up */
2745
2746void ipv6_mc_up(struct inet6_dev *idev)
2747{
2748 struct ifmcaddr6 *i;
2749
2750 /* Install multicast list, except for all-nodes (already installed) */
2751
2752 ipv6_mc_reset(idev);
2753 mutex_lock(&idev->mc_lock);
2754 for_each_mc_mclock(idev, i) {
2755 mld_del_delrec(idev, i);
2756 igmp6_group_added(i);
2757 }
2758 mutex_unlock(&idev->mc_lock);
2759}
2760
2761/* IPv6 device initialization. */
2762
2763void ipv6_mc_init_dev(struct inet6_dev *idev)
2764{
2765 idev->mc_gq_running = 0;
2766 INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2767 RCU_INIT_POINTER(idev->mc_tomb, NULL);
2768 idev->mc_ifc_count = 0;
2769 INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2770 INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2771 INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2772 INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2773 skb_queue_head_init(&idev->mc_query_queue);
2774 skb_queue_head_init(&idev->mc_report_queue);
2775 spin_lock_init(&idev->mc_query_lock);
2776 spin_lock_init(&idev->mc_report_lock);
2777 mutex_init(&idev->mc_lock);
2778 ipv6_mc_reset(idev);
2779}
2780
2781/*
2782 * Device is about to be destroyed: clean up.
2783 */
2784
2785void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2786{
2787 struct ifmcaddr6 *i;
2788
2789 /* Deactivate works */
2790 ipv6_mc_down(idev);
2791 mutex_lock(&idev->mc_lock);
2792 mld_clear_delrec(idev);
2793 mutex_unlock(&idev->mc_lock);
2794 mld_clear_query(idev);
2795 mld_clear_report(idev);
2796
2797 /* Delete all-nodes address. */
2798 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2799 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2800 * fail.
2801 */
2802 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2803
2804 if (idev->cnf.forwarding)
2805 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2806
2807 mutex_lock(&idev->mc_lock);
2808 while ((i = mc_dereference(idev->mc_list, idev))) {
2809 rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2810
2811 ip6_mc_clear_src(i);
2812 ma_put(i);
2813 }
2814 mutex_unlock(&idev->mc_lock);
2815}
2816
2817static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2818{
2819 struct ifmcaddr6 *pmc;
2820
2821 ASSERT_RTNL();
2822
2823 mutex_lock(&idev->mc_lock);
2824 if (mld_in_v1_mode(idev)) {
2825 for_each_mc_mclock(idev, pmc)
2826 igmp6_join_group(pmc);
2827 } else {
2828 mld_send_report(idev, NULL);
2829 }
2830 mutex_unlock(&idev->mc_lock);
2831}
2832
2833static int ipv6_mc_netdev_event(struct notifier_block *this,
2834 unsigned long event,
2835 void *ptr)
2836{
2837 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2838 struct inet6_dev *idev = __in6_dev_get(dev);
2839
2840 switch (event) {
2841 case NETDEV_RESEND_IGMP:
2842 if (idev)
2843 ipv6_mc_rejoin_groups(idev);
2844 break;
2845 default:
2846 break;
2847 }
2848
2849 return NOTIFY_DONE;
2850}
2851
2852static struct notifier_block igmp6_netdev_notifier = {
2853 .notifier_call = ipv6_mc_netdev_event,
2854};
2855
2856#ifdef CONFIG_PROC_FS
2857struct igmp6_mc_iter_state {
2858 struct seq_net_private p;
2859 struct net_device *dev;
2860 struct inet6_dev *idev;
2861};
2862
2863#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2864
2865static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2866{
2867 struct ifmcaddr6 *im = NULL;
2868 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2869 struct net *net = seq_file_net(seq);
2870
2871 state->idev = NULL;
2872 for_each_netdev_rcu(net, state->dev) {
2873 struct inet6_dev *idev;
2874 idev = __in6_dev_get(state->dev);
2875 if (!idev)
2876 continue;
2877
2878 im = rcu_dereference(idev->mc_list);
2879 if (im) {
2880 state->idev = idev;
2881 break;
2882 }
2883 }
2884 return im;
2885}
2886
2887static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2888{
2889 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2890
2891 im = rcu_dereference(im->next);
2892 while (!im) {
2893 state->dev = next_net_device_rcu(state->dev);
2894 if (!state->dev) {
2895 state->idev = NULL;
2896 break;
2897 }
2898 state->idev = __in6_dev_get(state->dev);
2899 if (!state->idev)
2900 continue;
2901 im = rcu_dereference(state->idev->mc_list);
2902 }
2903 return im;
2904}
2905
2906static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2907{
2908 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2909 if (im)
2910 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2911 --pos;
2912 return pos ? NULL : im;
2913}
2914
2915static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2916 __acquires(RCU)
2917{
2918 rcu_read_lock();
2919 return igmp6_mc_get_idx(seq, *pos);
2920}
2921
2922static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2923{
2924 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2925
2926 ++*pos;
2927 return im;
2928}
2929
2930static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2931 __releases(RCU)
2932{
2933 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2934
2935 if (likely(state->idev))
2936 state->idev = NULL;
2937 state->dev = NULL;
2938 rcu_read_unlock();
2939}
2940
2941static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2942{
2943 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2944 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2945
2946 seq_printf(seq,
2947 "%-4d %-15s %pi6 %5d %08X %ld\n",
2948 state->dev->ifindex, state->dev->name,
2949 &im->mca_addr,
2950 im->mca_users, im->mca_flags,
2951 (im->mca_flags & MAF_TIMER_RUNNING) ?
2952 jiffies_to_clock_t(im->mca_work.timer.expires - jiffies) : 0);
2953 return 0;
2954}
2955
2956static const struct seq_operations igmp6_mc_seq_ops = {
2957 .start = igmp6_mc_seq_start,
2958 .next = igmp6_mc_seq_next,
2959 .stop = igmp6_mc_seq_stop,
2960 .show = igmp6_mc_seq_show,
2961};
2962
2963struct igmp6_mcf_iter_state {
2964 struct seq_net_private p;
2965 struct net_device *dev;
2966 struct inet6_dev *idev;
2967 struct ifmcaddr6 *im;
2968};
2969
2970#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2971
2972static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2973{
2974 struct ip6_sf_list *psf = NULL;
2975 struct ifmcaddr6 *im = NULL;
2976 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2977 struct net *net = seq_file_net(seq);
2978
2979 state->idev = NULL;
2980 state->im = NULL;
2981 for_each_netdev_rcu(net, state->dev) {
2982 struct inet6_dev *idev;
2983 idev = __in6_dev_get(state->dev);
2984 if (unlikely(idev == NULL))
2985 continue;
2986
2987 im = rcu_dereference(idev->mc_list);
2988 if (likely(im)) {
2989 psf = rcu_dereference(im->mca_sources);
2990 if (likely(psf)) {
2991 state->im = im;
2992 state->idev = idev;
2993 break;
2994 }
2995 }
2996 }
2997 return psf;
2998}
2999
3000static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
3001{
3002 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3003
3004 psf = rcu_dereference(psf->sf_next);
3005 while (!psf) {
3006 state->im = rcu_dereference(state->im->next);
3007 while (!state->im) {
3008 state->dev = next_net_device_rcu(state->dev);
3009 if (!state->dev) {
3010 state->idev = NULL;
3011 goto out;
3012 }
3013 state->idev = __in6_dev_get(state->dev);
3014 if (!state->idev)
3015 continue;
3016 state->im = rcu_dereference(state->idev->mc_list);
3017 }
3018 if (!state->im)
3019 break;
3020 psf = rcu_dereference(state->im->mca_sources);
3021 }
3022out:
3023 return psf;
3024}
3025
3026static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
3027{
3028 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
3029 if (psf)
3030 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
3031 --pos;
3032 return pos ? NULL : psf;
3033}
3034
3035static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
3036 __acquires(RCU)
3037{
3038 rcu_read_lock();
3039 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
3040}
3041
3042static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3043{
3044 struct ip6_sf_list *psf;
3045 if (v == SEQ_START_TOKEN)
3046 psf = igmp6_mcf_get_first(seq);
3047 else
3048 psf = igmp6_mcf_get_next(seq, v);
3049 ++*pos;
3050 return psf;
3051}
3052
3053static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
3054 __releases(RCU)
3055{
3056 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3057
3058 if (likely(state->im))
3059 state->im = NULL;
3060 if (likely(state->idev))
3061 state->idev = NULL;
3062
3063 state->dev = NULL;
3064 rcu_read_unlock();
3065}
3066
3067static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
3068{
3069 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
3070 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3071
3072 if (v == SEQ_START_TOKEN) {
3073 seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
3074 } else {
3075 seq_printf(seq,
3076 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
3077 state->dev->ifindex, state->dev->name,
3078 &state->im->mca_addr,
3079 &psf->sf_addr,
3080 psf->sf_count[MCAST_INCLUDE],
3081 psf->sf_count[MCAST_EXCLUDE]);
3082 }
3083 return 0;
3084}
3085
3086static const struct seq_operations igmp6_mcf_seq_ops = {
3087 .start = igmp6_mcf_seq_start,
3088 .next = igmp6_mcf_seq_next,
3089 .stop = igmp6_mcf_seq_stop,
3090 .show = igmp6_mcf_seq_show,
3091};
3092
3093static int __net_init igmp6_proc_init(struct net *net)
3094{
3095 int err;
3096
3097 err = -ENOMEM;
3098 if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
3099 sizeof(struct igmp6_mc_iter_state)))
3100 goto out;
3101 if (!proc_create_net("mcfilter6", 0444, net->proc_net,
3102 &igmp6_mcf_seq_ops,
3103 sizeof(struct igmp6_mcf_iter_state)))
3104 goto out_proc_net_igmp6;
3105
3106 err = 0;
3107out:
3108 return err;
3109
3110out_proc_net_igmp6:
3111 remove_proc_entry("igmp6", net->proc_net);
3112 goto out;
3113}
3114
3115static void __net_exit igmp6_proc_exit(struct net *net)
3116{
3117 remove_proc_entry("mcfilter6", net->proc_net);
3118 remove_proc_entry("igmp6", net->proc_net);
3119}
3120#else
3121static inline int igmp6_proc_init(struct net *net)
3122{
3123 return 0;
3124}
3125static inline void igmp6_proc_exit(struct net *net)
3126{
3127}
3128#endif
3129
3130static int __net_init igmp6_net_init(struct net *net)
3131{
3132 int err;
3133
3134 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
3135 SOCK_RAW, IPPROTO_ICMPV6, net);
3136 if (err < 0) {
3137 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
3138 err);
3139 goto out;
3140 }
3141
3142 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
3143 net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL;
3144
3145 err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
3146 SOCK_RAW, IPPROTO_ICMPV6, net);
3147 if (err < 0) {
3148 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
3149 err);
3150 goto out_sock_create;
3151 }
3152
3153 err = igmp6_proc_init(net);
3154 if (err)
3155 goto out_sock_create_autojoin;
3156
3157 return 0;
3158
3159out_sock_create_autojoin:
3160 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3161out_sock_create:
3162 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3163out:
3164 return err;
3165}
3166
3167static void __net_exit igmp6_net_exit(struct net *net)
3168{
3169 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3170 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3171 igmp6_proc_exit(net);
3172}
3173
3174static struct pernet_operations igmp6_net_ops = {
3175 .init = igmp6_net_init,
3176 .exit = igmp6_net_exit,
3177};
3178
3179int __init igmp6_init(void)
3180{
3181 int err;
3182
3183 err = register_pernet_subsys(&igmp6_net_ops);
3184 if (err)
3185 return err;
3186
3187 mld_wq = create_workqueue("mld");
3188 if (!mld_wq) {
3189 unregister_pernet_subsys(&igmp6_net_ops);
3190 return -ENOMEM;
3191 }
3192
3193 return err;
3194}
3195
3196int __init igmp6_late_init(void)
3197{
3198 return register_netdevice_notifier(&igmp6_netdev_notifier);
3199}
3200
3201void igmp6_cleanup(void)
3202{
3203 unregister_pernet_subsys(&igmp6_net_ops);
3204 destroy_workqueue(mld_wq);
3205}
3206
3207void igmp6_late_cleanup(void)
3208{
3209 unregister_netdevice_notifier(&igmp6_netdev_notifier);
3210}
1/*
2 * Multicast support for IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16/* Changes:
17 *
18 * yoshfuji : fix format of router-alert option
19 * YOSHIFUJI Hideaki @USAGI:
20 * Fixed source address for MLD message based on
21 * <draft-ietf-magma-mld-source-05.txt>.
22 * YOSHIFUJI Hideaki @USAGI:
23 * - Ignore Queries for invalid addresses.
24 * - MLD for link-local addresses.
25 * David L Stevens <dlstevens@us.ibm.com>:
26 * - MLDv2 support
27 */
28
29#include <linux/module.h>
30#include <linux/errno.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/socket.h>
34#include <linux/sockios.h>
35#include <linux/jiffies.h>
36#include <linux/times.h>
37#include <linux/net.h>
38#include <linux/in.h>
39#include <linux/in6.h>
40#include <linux/netdevice.h>
41#include <linux/if_arp.h>
42#include <linux/route.h>
43#include <linux/init.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/slab.h>
47#include <net/mld.h>
48
49#include <linux/netfilter.h>
50#include <linux/netfilter_ipv6.h>
51
52#include <net/net_namespace.h>
53#include <net/sock.h>
54#include <net/snmp.h>
55
56#include <net/ipv6.h>
57#include <net/protocol.h>
58#include <net/if_inet6.h>
59#include <net/ndisc.h>
60#include <net/addrconf.h>
61#include <net/ip6_route.h>
62#include <net/inet_common.h>
63
64#include <net/ip6_checksum.h>
65
66/* Set to 3 to get tracing... */
67#define MCAST_DEBUG 2
68
69#if MCAST_DEBUG >= 3
70#define MDBG(x) printk x
71#else
72#define MDBG(x)
73#endif
74
75/* Ensure that we have struct in6_addr aligned on 32bit word. */
76static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
77 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
78 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
79 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
80};
81
82static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
83
84/* Big mc list lock for all the sockets */
85static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
86
87static void igmp6_join_group(struct ifmcaddr6 *ma);
88static void igmp6_leave_group(struct ifmcaddr6 *ma);
89static void igmp6_timer_handler(unsigned long data);
90
91static void mld_gq_timer_expire(unsigned long data);
92static void mld_ifc_timer_expire(unsigned long data);
93static void mld_ifc_event(struct inet6_dev *idev);
94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
95static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
96static void mld_clear_delrec(struct inet6_dev *idev);
97static int sf_setstate(struct ifmcaddr6 *pmc);
98static void sf_markstate(struct ifmcaddr6 *pmc);
99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
100static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
101 int sfmode, int sfcount, const struct in6_addr *psfsrc,
102 int delta);
103static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
104 int sfmode, int sfcount, const struct in6_addr *psfsrc,
105 int delta);
106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
107 struct inet6_dev *idev);
108
109
110#define IGMP6_UNSOLICITED_IVAL (10*HZ)
111#define MLD_QRV_DEFAULT 2
112
113#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
114 (idev)->cnf.force_mld_version == 1 || \
115 ((idev)->mc_v1_seen && \
116 time_before(jiffies, (idev)->mc_v1_seen)))
117
118#define IPV6_MLD_MAX_MSF 64
119
120int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
121
122/*
123 * socket join on multicast group
124 */
125
126#define for_each_pmc_rcu(np, pmc) \
127 for (pmc = rcu_dereference(np->ipv6_mc_list); \
128 pmc != NULL; \
129 pmc = rcu_dereference(pmc->next))
130
131int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
132{
133 struct net_device *dev = NULL;
134 struct ipv6_mc_socklist *mc_lst;
135 struct ipv6_pinfo *np = inet6_sk(sk);
136 struct net *net = sock_net(sk);
137 int err;
138
139 if (!ipv6_addr_is_multicast(addr))
140 return -EINVAL;
141
142 rcu_read_lock();
143 for_each_pmc_rcu(np, mc_lst) {
144 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
145 ipv6_addr_equal(&mc_lst->addr, addr)) {
146 rcu_read_unlock();
147 return -EADDRINUSE;
148 }
149 }
150 rcu_read_unlock();
151
152 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
153
154 if (mc_lst == NULL)
155 return -ENOMEM;
156
157 mc_lst->next = NULL;
158 mc_lst->addr = *addr;
159
160 rcu_read_lock();
161 if (ifindex == 0) {
162 struct rt6_info *rt;
163 rt = rt6_lookup(net, addr, NULL, 0, 0);
164 if (rt) {
165 dev = rt->dst.dev;
166 dst_release(&rt->dst);
167 }
168 } else
169 dev = dev_get_by_index_rcu(net, ifindex);
170
171 if (dev == NULL) {
172 rcu_read_unlock();
173 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
174 return -ENODEV;
175 }
176
177 mc_lst->ifindex = dev->ifindex;
178 mc_lst->sfmode = MCAST_EXCLUDE;
179 rwlock_init(&mc_lst->sflock);
180 mc_lst->sflist = NULL;
181
182 /*
183 * now add/increase the group membership on the device
184 */
185
186 err = ipv6_dev_mc_inc(dev, addr);
187
188 if (err) {
189 rcu_read_unlock();
190 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
191 return err;
192 }
193
194 spin_lock(&ipv6_sk_mc_lock);
195 mc_lst->next = np->ipv6_mc_list;
196 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
197 spin_unlock(&ipv6_sk_mc_lock);
198
199 rcu_read_unlock();
200
201 return 0;
202}
203
204/*
205 * socket leave on multicast group
206 */
207int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
208{
209 struct ipv6_pinfo *np = inet6_sk(sk);
210 struct ipv6_mc_socklist *mc_lst;
211 struct ipv6_mc_socklist __rcu **lnk;
212 struct net *net = sock_net(sk);
213
214 spin_lock(&ipv6_sk_mc_lock);
215 for (lnk = &np->ipv6_mc_list;
216 (mc_lst = rcu_dereference_protected(*lnk,
217 lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
218 lnk = &mc_lst->next) {
219 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
220 ipv6_addr_equal(&mc_lst->addr, addr)) {
221 struct net_device *dev;
222
223 *lnk = mc_lst->next;
224 spin_unlock(&ipv6_sk_mc_lock);
225
226 rcu_read_lock();
227 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
228 if (dev != NULL) {
229 struct inet6_dev *idev = __in6_dev_get(dev);
230
231 (void) ip6_mc_leave_src(sk, mc_lst, idev);
232 if (idev)
233 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
234 } else
235 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
236 rcu_read_unlock();
237 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
238 kfree_rcu(mc_lst, rcu);
239 return 0;
240 }
241 }
242 spin_unlock(&ipv6_sk_mc_lock);
243
244 return -EADDRNOTAVAIL;
245}
246
247/* called with rcu_read_lock() */
248static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
249 const struct in6_addr *group,
250 int ifindex)
251{
252 struct net_device *dev = NULL;
253 struct inet6_dev *idev = NULL;
254
255 if (ifindex == 0) {
256 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
257
258 if (rt) {
259 dev = rt->dst.dev;
260 dst_release(&rt->dst);
261 }
262 } else
263 dev = dev_get_by_index_rcu(net, ifindex);
264
265 if (!dev)
266 return NULL;
267 idev = __in6_dev_get(dev);
268 if (!idev)
269 return NULL;
270 read_lock_bh(&idev->lock);
271 if (idev->dead) {
272 read_unlock_bh(&idev->lock);
273 return NULL;
274 }
275 return idev;
276}
277
278void ipv6_sock_mc_close(struct sock *sk)
279{
280 struct ipv6_pinfo *np = inet6_sk(sk);
281 struct ipv6_mc_socklist *mc_lst;
282 struct net *net = sock_net(sk);
283
284 spin_lock(&ipv6_sk_mc_lock);
285 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
286 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
287 struct net_device *dev;
288
289 np->ipv6_mc_list = mc_lst->next;
290 spin_unlock(&ipv6_sk_mc_lock);
291
292 rcu_read_lock();
293 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
294 if (dev) {
295 struct inet6_dev *idev = __in6_dev_get(dev);
296
297 (void) ip6_mc_leave_src(sk, mc_lst, idev);
298 if (idev)
299 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
300 } else
301 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
302 rcu_read_unlock();
303
304 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
305 kfree_rcu(mc_lst, rcu);
306
307 spin_lock(&ipv6_sk_mc_lock);
308 }
309 spin_unlock(&ipv6_sk_mc_lock);
310}
311
312int ip6_mc_source(int add, int omode, struct sock *sk,
313 struct group_source_req *pgsr)
314{
315 struct in6_addr *source, *group;
316 struct ipv6_mc_socklist *pmc;
317 struct inet6_dev *idev;
318 struct ipv6_pinfo *inet6 = inet6_sk(sk);
319 struct ip6_sf_socklist *psl;
320 struct net *net = sock_net(sk);
321 int i, j, rv;
322 int leavegroup = 0;
323 int pmclocked = 0;
324 int err;
325
326 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
327 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
328
329 if (!ipv6_addr_is_multicast(group))
330 return -EINVAL;
331
332 rcu_read_lock();
333 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
334 if (!idev) {
335 rcu_read_unlock();
336 return -ENODEV;
337 }
338
339 err = -EADDRNOTAVAIL;
340
341 for_each_pmc_rcu(inet6, pmc) {
342 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
343 continue;
344 if (ipv6_addr_equal(&pmc->addr, group))
345 break;
346 }
347 if (!pmc) { /* must have a prior join */
348 err = -EINVAL;
349 goto done;
350 }
351 /* if a source filter was set, must be the same mode as before */
352 if (pmc->sflist) {
353 if (pmc->sfmode != omode) {
354 err = -EINVAL;
355 goto done;
356 }
357 } else if (pmc->sfmode != omode) {
358 /* allow mode switches for empty-set filters */
359 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
360 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
361 pmc->sfmode = omode;
362 }
363
364 write_lock(&pmc->sflock);
365 pmclocked = 1;
366
367 psl = pmc->sflist;
368 if (!add) {
369 if (!psl)
370 goto done; /* err = -EADDRNOTAVAIL */
371 rv = !0;
372 for (i=0; i<psl->sl_count; i++) {
373 rv = memcmp(&psl->sl_addr[i], source,
374 sizeof(struct in6_addr));
375 if (rv == 0)
376 break;
377 }
378 if (rv) /* source not found */
379 goto done; /* err = -EADDRNOTAVAIL */
380
381 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
382 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
383 leavegroup = 1;
384 goto done;
385 }
386
387 /* update the interface filter */
388 ip6_mc_del_src(idev, group, omode, 1, source, 1);
389
390 for (j=i+1; j<psl->sl_count; j++)
391 psl->sl_addr[j-1] = psl->sl_addr[j];
392 psl->sl_count--;
393 err = 0;
394 goto done;
395 }
396 /* else, add a new source to the filter */
397
398 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
399 err = -ENOBUFS;
400 goto done;
401 }
402 if (!psl || psl->sl_count == psl->sl_max) {
403 struct ip6_sf_socklist *newpsl;
404 int count = IP6_SFBLOCK;
405
406 if (psl)
407 count += psl->sl_max;
408 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
409 if (!newpsl) {
410 err = -ENOBUFS;
411 goto done;
412 }
413 newpsl->sl_max = count;
414 newpsl->sl_count = count - IP6_SFBLOCK;
415 if (psl) {
416 for (i=0; i<psl->sl_count; i++)
417 newpsl->sl_addr[i] = psl->sl_addr[i];
418 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
419 }
420 pmc->sflist = psl = newpsl;
421 }
422 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
423 for (i=0; i<psl->sl_count; i++) {
424 rv = memcmp(&psl->sl_addr[i], source, sizeof(struct in6_addr));
425 if (rv == 0)
426 break;
427 }
428 if (rv == 0) /* address already there is an error */
429 goto done;
430 for (j=psl->sl_count-1; j>=i; j--)
431 psl->sl_addr[j+1] = psl->sl_addr[j];
432 psl->sl_addr[i] = *source;
433 psl->sl_count++;
434 err = 0;
435 /* update the interface list */
436 ip6_mc_add_src(idev, group, omode, 1, source, 1);
437done:
438 if (pmclocked)
439 write_unlock(&pmc->sflock);
440 read_unlock_bh(&idev->lock);
441 rcu_read_unlock();
442 if (leavegroup)
443 return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
444 return err;
445}
446
447int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
448{
449 const struct in6_addr *group;
450 struct ipv6_mc_socklist *pmc;
451 struct inet6_dev *idev;
452 struct ipv6_pinfo *inet6 = inet6_sk(sk);
453 struct ip6_sf_socklist *newpsl, *psl;
454 struct net *net = sock_net(sk);
455 int leavegroup = 0;
456 int i, err;
457
458 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
459
460 if (!ipv6_addr_is_multicast(group))
461 return -EINVAL;
462 if (gsf->gf_fmode != MCAST_INCLUDE &&
463 gsf->gf_fmode != MCAST_EXCLUDE)
464 return -EINVAL;
465
466 rcu_read_lock();
467 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
468
469 if (!idev) {
470 rcu_read_unlock();
471 return -ENODEV;
472 }
473
474 err = 0;
475
476 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
477 leavegroup = 1;
478 goto done;
479 }
480
481 for_each_pmc_rcu(inet6, pmc) {
482 if (pmc->ifindex != gsf->gf_interface)
483 continue;
484 if (ipv6_addr_equal(&pmc->addr, group))
485 break;
486 }
487 if (!pmc) { /* must have a prior join */
488 err = -EINVAL;
489 goto done;
490 }
491 if (gsf->gf_numsrc) {
492 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
493 GFP_ATOMIC);
494 if (!newpsl) {
495 err = -ENOBUFS;
496 goto done;
497 }
498 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
499 for (i=0; i<newpsl->sl_count; ++i) {
500 struct sockaddr_in6 *psin6;
501
502 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
503 newpsl->sl_addr[i] = psin6->sin6_addr;
504 }
505 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
506 newpsl->sl_count, newpsl->sl_addr, 0);
507 if (err) {
508 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
509 goto done;
510 }
511 } else {
512 newpsl = NULL;
513 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
514 }
515
516 write_lock(&pmc->sflock);
517 psl = pmc->sflist;
518 if (psl) {
519 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
520 psl->sl_count, psl->sl_addr, 0);
521 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
522 } else
523 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
524 pmc->sflist = newpsl;
525 pmc->sfmode = gsf->gf_fmode;
526 write_unlock(&pmc->sflock);
527 err = 0;
528done:
529 read_unlock_bh(&idev->lock);
530 rcu_read_unlock();
531 if (leavegroup)
532 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
533 return err;
534}
535
536int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
537 struct group_filter __user *optval, int __user *optlen)
538{
539 int err, i, count, copycount;
540 const struct in6_addr *group;
541 struct ipv6_mc_socklist *pmc;
542 struct inet6_dev *idev;
543 struct ipv6_pinfo *inet6 = inet6_sk(sk);
544 struct ip6_sf_socklist *psl;
545 struct net *net = sock_net(sk);
546
547 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
548
549 if (!ipv6_addr_is_multicast(group))
550 return -EINVAL;
551
552 rcu_read_lock();
553 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
554
555 if (!idev) {
556 rcu_read_unlock();
557 return -ENODEV;
558 }
559
560 err = -EADDRNOTAVAIL;
561 /*
562 * changes to the ipv6_mc_list require the socket lock and
563 * a read lock on ip6_sk_mc_lock. We have the socket lock,
564 * so reading the list is safe.
565 */
566
567 for_each_pmc_rcu(inet6, pmc) {
568 if (pmc->ifindex != gsf->gf_interface)
569 continue;
570 if (ipv6_addr_equal(group, &pmc->addr))
571 break;
572 }
573 if (!pmc) /* must have a prior join */
574 goto done;
575 gsf->gf_fmode = pmc->sfmode;
576 psl = pmc->sflist;
577 count = psl ? psl->sl_count : 0;
578 read_unlock_bh(&idev->lock);
579 rcu_read_unlock();
580
581 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
582 gsf->gf_numsrc = count;
583 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
584 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
585 return -EFAULT;
586 }
587 /* changes to psl require the socket lock, a read lock on
588 * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
589 * have the socket lock, so reading here is safe.
590 */
591 for (i=0; i<copycount; i++) {
592 struct sockaddr_in6 *psin6;
593 struct sockaddr_storage ss;
594
595 psin6 = (struct sockaddr_in6 *)&ss;
596 memset(&ss, 0, sizeof(ss));
597 psin6->sin6_family = AF_INET6;
598 psin6->sin6_addr = psl->sl_addr[i];
599 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
600 return -EFAULT;
601 }
602 return 0;
603done:
604 read_unlock_bh(&idev->lock);
605 rcu_read_unlock();
606 return err;
607}
608
609bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
610 const struct in6_addr *src_addr)
611{
612 struct ipv6_pinfo *np = inet6_sk(sk);
613 struct ipv6_mc_socklist *mc;
614 struct ip6_sf_socklist *psl;
615 bool rv = true;
616
617 rcu_read_lock();
618 for_each_pmc_rcu(np, mc) {
619 if (ipv6_addr_equal(&mc->addr, mc_addr))
620 break;
621 }
622 if (!mc) {
623 rcu_read_unlock();
624 return true;
625 }
626 read_lock(&mc->sflock);
627 psl = mc->sflist;
628 if (!psl) {
629 rv = mc->sfmode == MCAST_EXCLUDE;
630 } else {
631 int i;
632
633 for (i=0; i<psl->sl_count; i++) {
634 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
635 break;
636 }
637 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
638 rv = false;
639 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
640 rv = false;
641 }
642 read_unlock(&mc->sflock);
643 rcu_read_unlock();
644
645 return rv;
646}
647
648static void ma_put(struct ifmcaddr6 *mc)
649{
650 if (atomic_dec_and_test(&mc->mca_refcnt)) {
651 in6_dev_put(mc->idev);
652 kfree(mc);
653 }
654}
655
656static void igmp6_group_added(struct ifmcaddr6 *mc)
657{
658 struct net_device *dev = mc->idev->dev;
659 char buf[MAX_ADDR_LEN];
660
661 spin_lock_bh(&mc->mca_lock);
662 if (!(mc->mca_flags&MAF_LOADED)) {
663 mc->mca_flags |= MAF_LOADED;
664 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
665 dev_mc_add(dev, buf);
666 }
667 spin_unlock_bh(&mc->mca_lock);
668
669 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
670 return;
671
672 if (MLD_V1_SEEN(mc->idev)) {
673 igmp6_join_group(mc);
674 return;
675 }
676 /* else v2 */
677
678 mc->mca_crcount = mc->idev->mc_qrv;
679 mld_ifc_event(mc->idev);
680}
681
682static void igmp6_group_dropped(struct ifmcaddr6 *mc)
683{
684 struct net_device *dev = mc->idev->dev;
685 char buf[MAX_ADDR_LEN];
686
687 spin_lock_bh(&mc->mca_lock);
688 if (mc->mca_flags&MAF_LOADED) {
689 mc->mca_flags &= ~MAF_LOADED;
690 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
691 dev_mc_del(dev, buf);
692 }
693
694 if (mc->mca_flags & MAF_NOREPORT)
695 goto done;
696 spin_unlock_bh(&mc->mca_lock);
697
698 if (!mc->idev->dead)
699 igmp6_leave_group(mc);
700
701 spin_lock_bh(&mc->mca_lock);
702 if (del_timer(&mc->mca_timer))
703 atomic_dec(&mc->mca_refcnt);
704done:
705 ip6_mc_clear_src(mc);
706 spin_unlock_bh(&mc->mca_lock);
707}
708
709/*
710 * deleted ifmcaddr6 manipulation
711 */
712static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
713{
714 struct ifmcaddr6 *pmc;
715
716 /* this is an "ifmcaddr6" for convenience; only the fields below
717 * are actually used. In particular, the refcnt and users are not
718 * used for management of the delete list. Using the same structure
719 * for deleted items allows change reports to use common code with
720 * non-deleted or query-response MCA's.
721 */
722 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
723 if (!pmc)
724 return;
725
726 spin_lock_bh(&im->mca_lock);
727 spin_lock_init(&pmc->mca_lock);
728 pmc->idev = im->idev;
729 in6_dev_hold(idev);
730 pmc->mca_addr = im->mca_addr;
731 pmc->mca_crcount = idev->mc_qrv;
732 pmc->mca_sfmode = im->mca_sfmode;
733 if (pmc->mca_sfmode == MCAST_INCLUDE) {
734 struct ip6_sf_list *psf;
735
736 pmc->mca_tomb = im->mca_tomb;
737 pmc->mca_sources = im->mca_sources;
738 im->mca_tomb = im->mca_sources = NULL;
739 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
740 psf->sf_crcount = pmc->mca_crcount;
741 }
742 spin_unlock_bh(&im->mca_lock);
743
744 spin_lock_bh(&idev->mc_lock);
745 pmc->next = idev->mc_tomb;
746 idev->mc_tomb = pmc;
747 spin_unlock_bh(&idev->mc_lock);
748}
749
750static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
751{
752 struct ifmcaddr6 *pmc, *pmc_prev;
753 struct ip6_sf_list *psf, *psf_next;
754
755 spin_lock_bh(&idev->mc_lock);
756 pmc_prev = NULL;
757 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
758 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
759 break;
760 pmc_prev = pmc;
761 }
762 if (pmc) {
763 if (pmc_prev)
764 pmc_prev->next = pmc->next;
765 else
766 idev->mc_tomb = pmc->next;
767 }
768 spin_unlock_bh(&idev->mc_lock);
769
770 if (pmc) {
771 for (psf=pmc->mca_tomb; psf; psf=psf_next) {
772 psf_next = psf->sf_next;
773 kfree(psf);
774 }
775 in6_dev_put(pmc->idev);
776 kfree(pmc);
777 }
778}
779
780static void mld_clear_delrec(struct inet6_dev *idev)
781{
782 struct ifmcaddr6 *pmc, *nextpmc;
783
784 spin_lock_bh(&idev->mc_lock);
785 pmc = idev->mc_tomb;
786 idev->mc_tomb = NULL;
787 spin_unlock_bh(&idev->mc_lock);
788
789 for (; pmc; pmc = nextpmc) {
790 nextpmc = pmc->next;
791 ip6_mc_clear_src(pmc);
792 in6_dev_put(pmc->idev);
793 kfree(pmc);
794 }
795
796 /* clear dead sources, too */
797 read_lock_bh(&idev->lock);
798 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
799 struct ip6_sf_list *psf, *psf_next;
800
801 spin_lock_bh(&pmc->mca_lock);
802 psf = pmc->mca_tomb;
803 pmc->mca_tomb = NULL;
804 spin_unlock_bh(&pmc->mca_lock);
805 for (; psf; psf=psf_next) {
806 psf_next = psf->sf_next;
807 kfree(psf);
808 }
809 }
810 read_unlock_bh(&idev->lock);
811}
812
813
814/*
815 * device multicast group inc (add if not found)
816 */
817int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
818{
819 struct ifmcaddr6 *mc;
820 struct inet6_dev *idev;
821
822 /* we need to take a reference on idev */
823 idev = in6_dev_get(dev);
824
825 if (idev == NULL)
826 return -EINVAL;
827
828 write_lock_bh(&idev->lock);
829 if (idev->dead) {
830 write_unlock_bh(&idev->lock);
831 in6_dev_put(idev);
832 return -ENODEV;
833 }
834
835 for (mc = idev->mc_list; mc; mc = mc->next) {
836 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
837 mc->mca_users++;
838 write_unlock_bh(&idev->lock);
839 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
840 NULL, 0);
841 in6_dev_put(idev);
842 return 0;
843 }
844 }
845
846 /*
847 * not found: create a new one.
848 */
849
850 mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
851
852 if (mc == NULL) {
853 write_unlock_bh(&idev->lock);
854 in6_dev_put(idev);
855 return -ENOMEM;
856 }
857
858 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
859
860 mc->mca_addr = *addr;
861 mc->idev = idev; /* (reference taken) */
862 mc->mca_users = 1;
863 /* mca_stamp should be updated upon changes */
864 mc->mca_cstamp = mc->mca_tstamp = jiffies;
865 atomic_set(&mc->mca_refcnt, 2);
866 spin_lock_init(&mc->mca_lock);
867
868 /* initial mode is (EX, empty) */
869 mc->mca_sfmode = MCAST_EXCLUDE;
870 mc->mca_sfcount[MCAST_EXCLUDE] = 1;
871
872 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
873 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
874 mc->mca_flags |= MAF_NOREPORT;
875
876 mc->next = idev->mc_list;
877 idev->mc_list = mc;
878 write_unlock_bh(&idev->lock);
879
880 mld_del_delrec(idev, &mc->mca_addr);
881 igmp6_group_added(mc);
882 ma_put(mc);
883 return 0;
884}
885
886/*
887 * device multicast group del
888 */
889int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
890{
891 struct ifmcaddr6 *ma, **map;
892
893 write_lock_bh(&idev->lock);
894 for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
895 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
896 if (--ma->mca_users == 0) {
897 *map = ma->next;
898 write_unlock_bh(&idev->lock);
899
900 igmp6_group_dropped(ma);
901
902 ma_put(ma);
903 return 0;
904 }
905 write_unlock_bh(&idev->lock);
906 return 0;
907 }
908 }
909 write_unlock_bh(&idev->lock);
910
911 return -ENOENT;
912}
913
914int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
915{
916 struct inet6_dev *idev;
917 int err;
918
919 rcu_read_lock();
920
921 idev = __in6_dev_get(dev);
922 if (!idev)
923 err = -ENODEV;
924 else
925 err = __ipv6_dev_mc_dec(idev, addr);
926
927 rcu_read_unlock();
928 return err;
929}
930
931/*
932 * identify MLD packets for MLD filter exceptions
933 */
934bool ipv6_is_mld(struct sk_buff *skb, int nexthdr)
935{
936 struct icmp6hdr *pic;
937
938 if (nexthdr != IPPROTO_ICMPV6)
939 return false;
940
941 if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
942 return false;
943
944 pic = icmp6_hdr(skb);
945
946 switch (pic->icmp6_type) {
947 case ICMPV6_MGM_QUERY:
948 case ICMPV6_MGM_REPORT:
949 case ICMPV6_MGM_REDUCTION:
950 case ICMPV6_MLD2_REPORT:
951 return true;
952 default:
953 break;
954 }
955 return false;
956}
957
958/*
959 * check if the interface/address pair is valid
960 */
961bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
962 const struct in6_addr *src_addr)
963{
964 struct inet6_dev *idev;
965 struct ifmcaddr6 *mc;
966 bool rv = false;
967
968 rcu_read_lock();
969 idev = __in6_dev_get(dev);
970 if (idev) {
971 read_lock_bh(&idev->lock);
972 for (mc = idev->mc_list; mc; mc=mc->next) {
973 if (ipv6_addr_equal(&mc->mca_addr, group))
974 break;
975 }
976 if (mc) {
977 if (src_addr && !ipv6_addr_any(src_addr)) {
978 struct ip6_sf_list *psf;
979
980 spin_lock_bh(&mc->mca_lock);
981 for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
982 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
983 break;
984 }
985 if (psf)
986 rv = psf->sf_count[MCAST_INCLUDE] ||
987 psf->sf_count[MCAST_EXCLUDE] !=
988 mc->mca_sfcount[MCAST_EXCLUDE];
989 else
990 rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
991 spin_unlock_bh(&mc->mca_lock);
992 } else
993 rv = true; /* don't filter unspecified source */
994 }
995 read_unlock_bh(&idev->lock);
996 }
997 rcu_read_unlock();
998 return rv;
999}
1000
1001static void mld_gq_start_timer(struct inet6_dev *idev)
1002{
1003 int tv = net_random() % idev->mc_maxdelay;
1004
1005 idev->mc_gq_running = 1;
1006 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1007 in6_dev_hold(idev);
1008}
1009
1010static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
1011{
1012 int tv = net_random() % delay;
1013
1014 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1015 in6_dev_hold(idev);
1016}
1017
1018/*
1019 * IGMP handling (alias multicast ICMPv6 messages)
1020 */
1021
1022static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1023{
1024 unsigned long delay = resptime;
1025
1026 /* Do not start timer for these addresses */
1027 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1028 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1029 return;
1030
1031 if (del_timer(&ma->mca_timer)) {
1032 atomic_dec(&ma->mca_refcnt);
1033 delay = ma->mca_timer.expires - jiffies;
1034 }
1035
1036 if (delay >= resptime) {
1037 if (resptime)
1038 delay = net_random() % resptime;
1039 else
1040 delay = 1;
1041 }
1042 ma->mca_timer.expires = jiffies + delay;
1043 if (!mod_timer(&ma->mca_timer, jiffies + delay))
1044 atomic_inc(&ma->mca_refcnt);
1045 ma->mca_flags |= MAF_TIMER_RUNNING;
1046}
1047
1048/* mark EXCLUDE-mode sources */
1049static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1050 const struct in6_addr *srcs)
1051{
1052 struct ip6_sf_list *psf;
1053 int i, scount;
1054
1055 scount = 0;
1056 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1057 if (scount == nsrcs)
1058 break;
1059 for (i=0; i<nsrcs; i++) {
1060 /* skip inactive filters */
1061 if (psf->sf_count[MCAST_INCLUDE] ||
1062 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1063 psf->sf_count[MCAST_EXCLUDE])
1064 break;
1065 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1066 scount++;
1067 break;
1068 }
1069 }
1070 }
1071 pmc->mca_flags &= ~MAF_GSQUERY;
1072 if (scount == nsrcs) /* all sources excluded */
1073 return false;
1074 return true;
1075}
1076
1077static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1078 const struct in6_addr *srcs)
1079{
1080 struct ip6_sf_list *psf;
1081 int i, scount;
1082
1083 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1084 return mld_xmarksources(pmc, nsrcs, srcs);
1085
1086 /* mark INCLUDE-mode sources */
1087
1088 scount = 0;
1089 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1090 if (scount == nsrcs)
1091 break;
1092 for (i=0; i<nsrcs; i++) {
1093 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1094 psf->sf_gsresp = 1;
1095 scount++;
1096 break;
1097 }
1098 }
1099 }
1100 if (!scount) {
1101 pmc->mca_flags &= ~MAF_GSQUERY;
1102 return false;
1103 }
1104 pmc->mca_flags |= MAF_GSQUERY;
1105 return true;
1106}
1107
1108/* called with rcu_read_lock() */
1109int igmp6_event_query(struct sk_buff *skb)
1110{
1111 struct mld2_query *mlh2 = NULL;
1112 struct ifmcaddr6 *ma;
1113 const struct in6_addr *group;
1114 unsigned long max_delay;
1115 struct inet6_dev *idev;
1116 struct mld_msg *mld;
1117 int group_type;
1118 int mark = 0;
1119 int len;
1120
1121 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1122 return -EINVAL;
1123
1124 /* compute payload length excluding extension headers */
1125 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1126 len -= skb_network_header_len(skb);
1127
1128 /* Drop queries with not link local source */
1129 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
1130 return -EINVAL;
1131
1132 idev = __in6_dev_get(skb->dev);
1133
1134 if (idev == NULL)
1135 return 0;
1136
1137 mld = (struct mld_msg *)icmp6_hdr(skb);
1138 group = &mld->mld_mca;
1139 group_type = ipv6_addr_type(group);
1140
1141 if (group_type != IPV6_ADDR_ANY &&
1142 !(group_type&IPV6_ADDR_MULTICAST))
1143 return -EINVAL;
1144
1145 if (len == 24) {
1146 int switchback;
1147 /* MLDv1 router present */
1148
1149 /* Translate milliseconds to jiffies */
1150 max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
1151
1152 switchback = (idev->mc_qrv + 1) * max_delay;
1153 idev->mc_v1_seen = jiffies + switchback;
1154
1155 /* cancel the interface change timer */
1156 idev->mc_ifc_count = 0;
1157 if (del_timer(&idev->mc_ifc_timer))
1158 __in6_dev_put(idev);
1159 /* clear deleted report items */
1160 mld_clear_delrec(idev);
1161 } else if (len >= 28) {
1162 int srcs_offset = sizeof(struct mld2_query) -
1163 sizeof(struct icmp6hdr);
1164 if (!pskb_may_pull(skb, srcs_offset))
1165 return -EINVAL;
1166
1167 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1168 max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
1169 if (!max_delay)
1170 max_delay = 1;
1171 idev->mc_maxdelay = max_delay;
1172 if (mlh2->mld2q_qrv)
1173 idev->mc_qrv = mlh2->mld2q_qrv;
1174 if (group_type == IPV6_ADDR_ANY) { /* general query */
1175 if (mlh2->mld2q_nsrcs)
1176 return -EINVAL; /* no sources allowed */
1177
1178 mld_gq_start_timer(idev);
1179 return 0;
1180 }
1181 /* mark sources to include, if group & source-specific */
1182 if (mlh2->mld2q_nsrcs != 0) {
1183 if (!pskb_may_pull(skb, srcs_offset +
1184 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1185 return -EINVAL;
1186
1187 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1188 mark = 1;
1189 }
1190 } else
1191 return -EINVAL;
1192
1193 read_lock_bh(&idev->lock);
1194 if (group_type == IPV6_ADDR_ANY) {
1195 for (ma = idev->mc_list; ma; ma=ma->next) {
1196 spin_lock_bh(&ma->mca_lock);
1197 igmp6_group_queried(ma, max_delay);
1198 spin_unlock_bh(&ma->mca_lock);
1199 }
1200 } else {
1201 for (ma = idev->mc_list; ma; ma=ma->next) {
1202 if (!ipv6_addr_equal(group, &ma->mca_addr))
1203 continue;
1204 spin_lock_bh(&ma->mca_lock);
1205 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1206 /* gsquery <- gsquery && mark */
1207 if (!mark)
1208 ma->mca_flags &= ~MAF_GSQUERY;
1209 } else {
1210 /* gsquery <- mark */
1211 if (mark)
1212 ma->mca_flags |= MAF_GSQUERY;
1213 else
1214 ma->mca_flags &= ~MAF_GSQUERY;
1215 }
1216 if (!(ma->mca_flags & MAF_GSQUERY) ||
1217 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1218 igmp6_group_queried(ma, max_delay);
1219 spin_unlock_bh(&ma->mca_lock);
1220 break;
1221 }
1222 }
1223 read_unlock_bh(&idev->lock);
1224
1225 return 0;
1226}
1227
1228/* called with rcu_read_lock() */
1229int igmp6_event_report(struct sk_buff *skb)
1230{
1231 struct ifmcaddr6 *ma;
1232 struct inet6_dev *idev;
1233 struct mld_msg *mld;
1234 int addr_type;
1235
1236 /* Our own report looped back. Ignore it. */
1237 if (skb->pkt_type == PACKET_LOOPBACK)
1238 return 0;
1239
1240 /* send our report if the MC router may not have heard this report */
1241 if (skb->pkt_type != PACKET_MULTICAST &&
1242 skb->pkt_type != PACKET_BROADCAST)
1243 return 0;
1244
1245 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1246 return -EINVAL;
1247
1248 mld = (struct mld_msg *)icmp6_hdr(skb);
1249
1250 /* Drop reports with not link local source */
1251 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1252 if (addr_type != IPV6_ADDR_ANY &&
1253 !(addr_type&IPV6_ADDR_LINKLOCAL))
1254 return -EINVAL;
1255
1256 idev = __in6_dev_get(skb->dev);
1257 if (idev == NULL)
1258 return -ENODEV;
1259
1260 /*
1261 * Cancel the timer for this group
1262 */
1263
1264 read_lock_bh(&idev->lock);
1265 for (ma = idev->mc_list; ma; ma=ma->next) {
1266 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1267 spin_lock(&ma->mca_lock);
1268 if (del_timer(&ma->mca_timer))
1269 atomic_dec(&ma->mca_refcnt);
1270 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1271 spin_unlock(&ma->mca_lock);
1272 break;
1273 }
1274 }
1275 read_unlock_bh(&idev->lock);
1276 return 0;
1277}
1278
1279static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1280 int gdeleted, int sdeleted)
1281{
1282 switch (type) {
1283 case MLD2_MODE_IS_INCLUDE:
1284 case MLD2_MODE_IS_EXCLUDE:
1285 if (gdeleted || sdeleted)
1286 return false;
1287 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1288 if (pmc->mca_sfmode == MCAST_INCLUDE)
1289 return true;
1290 /* don't include if this source is excluded
1291 * in all filters
1292 */
1293 if (psf->sf_count[MCAST_INCLUDE])
1294 return type == MLD2_MODE_IS_INCLUDE;
1295 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1296 psf->sf_count[MCAST_EXCLUDE];
1297 }
1298 return false;
1299 case MLD2_CHANGE_TO_INCLUDE:
1300 if (gdeleted || sdeleted)
1301 return false;
1302 return psf->sf_count[MCAST_INCLUDE] != 0;
1303 case MLD2_CHANGE_TO_EXCLUDE:
1304 if (gdeleted || sdeleted)
1305 return false;
1306 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1307 psf->sf_count[MCAST_INCLUDE])
1308 return false;
1309 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1310 psf->sf_count[MCAST_EXCLUDE];
1311 case MLD2_ALLOW_NEW_SOURCES:
1312 if (gdeleted || !psf->sf_crcount)
1313 return false;
1314 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1315 case MLD2_BLOCK_OLD_SOURCES:
1316 if (pmc->mca_sfmode == MCAST_INCLUDE)
1317 return gdeleted || (psf->sf_crcount && sdeleted);
1318 return psf->sf_crcount && !gdeleted && !sdeleted;
1319 }
1320 return false;
1321}
1322
1323static int
1324mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1325{
1326 struct ip6_sf_list *psf;
1327 int scount = 0;
1328
1329 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1330 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1331 continue;
1332 scount++;
1333 }
1334 return scount;
1335}
1336
1337static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1338{
1339 struct net *net = dev_net(dev);
1340 struct sock *sk = net->ipv6.igmp_sk;
1341 struct sk_buff *skb;
1342 struct mld2_report *pmr;
1343 struct in6_addr addr_buf;
1344 const struct in6_addr *saddr;
1345 int hlen = LL_RESERVED_SPACE(dev);
1346 int tlen = dev->needed_tailroom;
1347 int err;
1348 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1349 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1350 IPV6_TLV_PADN, 0 };
1351
1352 /* we assume size > sizeof(ra) here */
1353 size += hlen + tlen;
1354 /* limit our allocations to order-0 page */
1355 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1356 skb = sock_alloc_send_skb(sk, size, 1, &err);
1357
1358 if (!skb)
1359 return NULL;
1360
1361 skb_reserve(skb, hlen);
1362
1363 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1364 /* <draft-ietf-magma-mld-source-05.txt>:
1365 * use unspecified address as the source address
1366 * when a valid link-local address is not available.
1367 */
1368 saddr = &in6addr_any;
1369 } else
1370 saddr = &addr_buf;
1371
1372 ip6_nd_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1373
1374 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1375
1376 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1377 skb_put(skb, sizeof(*pmr));
1378 pmr = (struct mld2_report *)skb_transport_header(skb);
1379 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1380 pmr->mld2r_resv1 = 0;
1381 pmr->mld2r_cksum = 0;
1382 pmr->mld2r_resv2 = 0;
1383 pmr->mld2r_ngrec = 0;
1384 return skb;
1385}
1386
1387static void mld_sendpack(struct sk_buff *skb)
1388{
1389 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1390 struct mld2_report *pmr =
1391 (struct mld2_report *)skb_transport_header(skb);
1392 int payload_len, mldlen;
1393 struct inet6_dev *idev;
1394 struct net *net = dev_net(skb->dev);
1395 int err;
1396 struct flowi6 fl6;
1397 struct dst_entry *dst;
1398
1399 rcu_read_lock();
1400 idev = __in6_dev_get(skb->dev);
1401 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1402
1403 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
1404 mldlen = skb->tail - skb->transport_header;
1405 pip6->payload_len = htons(payload_len);
1406
1407 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1408 IPPROTO_ICMPV6,
1409 csum_partial(skb_transport_header(skb),
1410 mldlen, 0));
1411
1412 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1413 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1414 skb->dev->ifindex);
1415 dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
1416
1417 err = 0;
1418 if (IS_ERR(dst)) {
1419 err = PTR_ERR(dst);
1420 dst = NULL;
1421 }
1422 skb_dst_set(skb, dst);
1423 if (err)
1424 goto err_out;
1425
1426 payload_len = skb->len;
1427
1428 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1429 dst_output);
1430out:
1431 if (!err) {
1432 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
1433 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1434 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1435 } else
1436 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1437
1438 rcu_read_unlock();
1439 return;
1440
1441err_out:
1442 kfree_skb(skb);
1443 goto out;
1444}
1445
1446static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1447{
1448 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1449}
1450
1451static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1452 int type, struct mld2_grec **ppgr)
1453{
1454 struct net_device *dev = pmc->idev->dev;
1455 struct mld2_report *pmr;
1456 struct mld2_grec *pgr;
1457
1458 if (!skb)
1459 skb = mld_newpack(dev, dev->mtu);
1460 if (!skb)
1461 return NULL;
1462 pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
1463 pgr->grec_type = type;
1464 pgr->grec_auxwords = 0;
1465 pgr->grec_nsrcs = 0;
1466 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1467 pmr = (struct mld2_report *)skb_transport_header(skb);
1468 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1469 *ppgr = pgr;
1470 return skb;
1471}
1472
1473#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
1474 skb_tailroom(skb)) : 0)
1475
1476static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1477 int type, int gdeleted, int sdeleted)
1478{
1479 struct net_device *dev = pmc->idev->dev;
1480 struct mld2_report *pmr;
1481 struct mld2_grec *pgr = NULL;
1482 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1483 int scount, stotal, first, isquery, truncate;
1484
1485 if (pmc->mca_flags & MAF_NOREPORT)
1486 return skb;
1487
1488 isquery = type == MLD2_MODE_IS_INCLUDE ||
1489 type == MLD2_MODE_IS_EXCLUDE;
1490 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1491 type == MLD2_CHANGE_TO_EXCLUDE;
1492
1493 stotal = scount = 0;
1494
1495 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1496
1497 if (!*psf_list)
1498 goto empty_source;
1499
1500 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1501
1502 /* EX and TO_EX get a fresh packet, if needed */
1503 if (truncate) {
1504 if (pmr && pmr->mld2r_ngrec &&
1505 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1506 if (skb)
1507 mld_sendpack(skb);
1508 skb = mld_newpack(dev, dev->mtu);
1509 }
1510 }
1511 first = 1;
1512 psf_prev = NULL;
1513 for (psf=*psf_list; psf; psf=psf_next) {
1514 struct in6_addr *psrc;
1515
1516 psf_next = psf->sf_next;
1517
1518 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
1519 psf_prev = psf;
1520 continue;
1521 }
1522
1523 /* clear marks on query responses */
1524 if (isquery)
1525 psf->sf_gsresp = 0;
1526
1527 if (AVAILABLE(skb) < sizeof(*psrc) +
1528 first*sizeof(struct mld2_grec)) {
1529 if (truncate && !first)
1530 break; /* truncate these */
1531 if (pgr)
1532 pgr->grec_nsrcs = htons(scount);
1533 if (skb)
1534 mld_sendpack(skb);
1535 skb = mld_newpack(dev, dev->mtu);
1536 first = 1;
1537 scount = 0;
1538 }
1539 if (first) {
1540 skb = add_grhead(skb, pmc, type, &pgr);
1541 first = 0;
1542 }
1543 if (!skb)
1544 return NULL;
1545 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1546 *psrc = psf->sf_addr;
1547 scount++; stotal++;
1548 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1549 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1550 psf->sf_crcount--;
1551 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1552 if (psf_prev)
1553 psf_prev->sf_next = psf->sf_next;
1554 else
1555 *psf_list = psf->sf_next;
1556 kfree(psf);
1557 continue;
1558 }
1559 }
1560 psf_prev = psf;
1561 }
1562
1563empty_source:
1564 if (!stotal) {
1565 if (type == MLD2_ALLOW_NEW_SOURCES ||
1566 type == MLD2_BLOCK_OLD_SOURCES)
1567 return skb;
1568 if (pmc->mca_crcount || isquery) {
1569 /* make sure we have room for group header */
1570 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1571 mld_sendpack(skb);
1572 skb = NULL; /* add_grhead will get a new one */
1573 }
1574 skb = add_grhead(skb, pmc, type, &pgr);
1575 }
1576 }
1577 if (pgr)
1578 pgr->grec_nsrcs = htons(scount);
1579
1580 if (isquery)
1581 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1582 return skb;
1583}
1584
1585static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1586{
1587 struct sk_buff *skb = NULL;
1588 int type;
1589
1590 if (!pmc) {
1591 read_lock_bh(&idev->lock);
1592 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1593 if (pmc->mca_flags & MAF_NOREPORT)
1594 continue;
1595 spin_lock_bh(&pmc->mca_lock);
1596 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1597 type = MLD2_MODE_IS_EXCLUDE;
1598 else
1599 type = MLD2_MODE_IS_INCLUDE;
1600 skb = add_grec(skb, pmc, type, 0, 0);
1601 spin_unlock_bh(&pmc->mca_lock);
1602 }
1603 read_unlock_bh(&idev->lock);
1604 } else {
1605 spin_lock_bh(&pmc->mca_lock);
1606 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1607 type = MLD2_MODE_IS_EXCLUDE;
1608 else
1609 type = MLD2_MODE_IS_INCLUDE;
1610 skb = add_grec(skb, pmc, type, 0, 0);
1611 spin_unlock_bh(&pmc->mca_lock);
1612 }
1613 if (skb)
1614 mld_sendpack(skb);
1615}
1616
1617/*
1618 * remove zero-count source records from a source filter list
1619 */
1620static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1621{
1622 struct ip6_sf_list *psf_prev, *psf_next, *psf;
1623
1624 psf_prev = NULL;
1625 for (psf=*ppsf; psf; psf = psf_next) {
1626 psf_next = psf->sf_next;
1627 if (psf->sf_crcount == 0) {
1628 if (psf_prev)
1629 psf_prev->sf_next = psf->sf_next;
1630 else
1631 *ppsf = psf->sf_next;
1632 kfree(psf);
1633 } else
1634 psf_prev = psf;
1635 }
1636}
1637
1638static void mld_send_cr(struct inet6_dev *idev)
1639{
1640 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1641 struct sk_buff *skb = NULL;
1642 int type, dtype;
1643
1644 read_lock_bh(&idev->lock);
1645 spin_lock(&idev->mc_lock);
1646
1647 /* deleted MCA's */
1648 pmc_prev = NULL;
1649 for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
1650 pmc_next = pmc->next;
1651 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1652 type = MLD2_BLOCK_OLD_SOURCES;
1653 dtype = MLD2_BLOCK_OLD_SOURCES;
1654 skb = add_grec(skb, pmc, type, 1, 0);
1655 skb = add_grec(skb, pmc, dtype, 1, 1);
1656 }
1657 if (pmc->mca_crcount) {
1658 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1659 type = MLD2_CHANGE_TO_INCLUDE;
1660 skb = add_grec(skb, pmc, type, 1, 0);
1661 }
1662 pmc->mca_crcount--;
1663 if (pmc->mca_crcount == 0) {
1664 mld_clear_zeros(&pmc->mca_tomb);
1665 mld_clear_zeros(&pmc->mca_sources);
1666 }
1667 }
1668 if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1669 !pmc->mca_sources) {
1670 if (pmc_prev)
1671 pmc_prev->next = pmc_next;
1672 else
1673 idev->mc_tomb = pmc_next;
1674 in6_dev_put(pmc->idev);
1675 kfree(pmc);
1676 } else
1677 pmc_prev = pmc;
1678 }
1679 spin_unlock(&idev->mc_lock);
1680
1681 /* change recs */
1682 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1683 spin_lock_bh(&pmc->mca_lock);
1684 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1685 type = MLD2_BLOCK_OLD_SOURCES;
1686 dtype = MLD2_ALLOW_NEW_SOURCES;
1687 } else {
1688 type = MLD2_ALLOW_NEW_SOURCES;
1689 dtype = MLD2_BLOCK_OLD_SOURCES;
1690 }
1691 skb = add_grec(skb, pmc, type, 0, 0);
1692 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
1693
1694 /* filter mode changes */
1695 if (pmc->mca_crcount) {
1696 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1697 type = MLD2_CHANGE_TO_EXCLUDE;
1698 else
1699 type = MLD2_CHANGE_TO_INCLUDE;
1700 skb = add_grec(skb, pmc, type, 0, 0);
1701 pmc->mca_crcount--;
1702 }
1703 spin_unlock_bh(&pmc->mca_lock);
1704 }
1705 read_unlock_bh(&idev->lock);
1706 if (!skb)
1707 return;
1708 (void) mld_sendpack(skb);
1709}
1710
1711static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1712{
1713 struct net *net = dev_net(dev);
1714 struct sock *sk = net->ipv6.igmp_sk;
1715 struct inet6_dev *idev;
1716 struct sk_buff *skb;
1717 struct mld_msg *hdr;
1718 const struct in6_addr *snd_addr, *saddr;
1719 struct in6_addr addr_buf;
1720 int hlen = LL_RESERVED_SPACE(dev);
1721 int tlen = dev->needed_tailroom;
1722 int err, len, payload_len, full_len;
1723 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1724 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1725 IPV6_TLV_PADN, 0 };
1726 struct flowi6 fl6;
1727 struct dst_entry *dst;
1728
1729 if (type == ICMPV6_MGM_REDUCTION)
1730 snd_addr = &in6addr_linklocal_allrouters;
1731 else
1732 snd_addr = addr;
1733
1734 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
1735 payload_len = len + sizeof(ra);
1736 full_len = sizeof(struct ipv6hdr) + payload_len;
1737
1738 rcu_read_lock();
1739 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1740 IPSTATS_MIB_OUT, full_len);
1741 rcu_read_unlock();
1742
1743 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
1744
1745 if (skb == NULL) {
1746 rcu_read_lock();
1747 IP6_INC_STATS(net, __in6_dev_get(dev),
1748 IPSTATS_MIB_OUTDISCARDS);
1749 rcu_read_unlock();
1750 return;
1751 }
1752
1753 skb_reserve(skb, hlen);
1754
1755 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1756 /* <draft-ietf-magma-mld-source-05.txt>:
1757 * use unspecified address as the source address
1758 * when a valid link-local address is not available.
1759 */
1760 saddr = &in6addr_any;
1761 } else
1762 saddr = &addr_buf;
1763
1764 ip6_nd_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1765
1766 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1767
1768 hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
1769 memset(hdr, 0, sizeof(struct mld_msg));
1770 hdr->mld_type = type;
1771 hdr->mld_mca = *addr;
1772
1773 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1774 IPPROTO_ICMPV6,
1775 csum_partial(hdr, len, 0));
1776
1777 rcu_read_lock();
1778 idev = __in6_dev_get(skb->dev);
1779
1780 icmpv6_flow_init(sk, &fl6, type,
1781 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1782 skb->dev->ifindex);
1783 dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
1784 if (IS_ERR(dst)) {
1785 err = PTR_ERR(dst);
1786 goto err_out;
1787 }
1788
1789 skb_dst_set(skb, dst);
1790 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1791 dst_output);
1792out:
1793 if (!err) {
1794 ICMP6MSGOUT_INC_STATS(net, idev, type);
1795 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1796 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1797 } else
1798 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1799
1800 rcu_read_unlock();
1801 return;
1802
1803err_out:
1804 kfree_skb(skb);
1805 goto out;
1806}
1807
1808static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1809 const struct in6_addr *psfsrc)
1810{
1811 struct ip6_sf_list *psf, *psf_prev;
1812 int rv = 0;
1813
1814 psf_prev = NULL;
1815 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1816 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
1817 break;
1818 psf_prev = psf;
1819 }
1820 if (!psf || psf->sf_count[sfmode] == 0) {
1821 /* source filter not found, or count wrong => bug */
1822 return -ESRCH;
1823 }
1824 psf->sf_count[sfmode]--;
1825 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1826 struct inet6_dev *idev = pmc->idev;
1827
1828 /* no more filters for this source */
1829 if (psf_prev)
1830 psf_prev->sf_next = psf->sf_next;
1831 else
1832 pmc->mca_sources = psf->sf_next;
1833 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
1834 !MLD_V1_SEEN(idev)) {
1835 psf->sf_crcount = idev->mc_qrv;
1836 psf->sf_next = pmc->mca_tomb;
1837 pmc->mca_tomb = psf;
1838 rv = 1;
1839 } else
1840 kfree(psf);
1841 }
1842 return rv;
1843}
1844
1845static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
1846 int sfmode, int sfcount, const struct in6_addr *psfsrc,
1847 int delta)
1848{
1849 struct ifmcaddr6 *pmc;
1850 int changerec = 0;
1851 int i, err;
1852
1853 if (!idev)
1854 return -ENODEV;
1855 read_lock_bh(&idev->lock);
1856 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1857 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
1858 break;
1859 }
1860 if (!pmc) {
1861 /* MCA not found?? bug */
1862 read_unlock_bh(&idev->lock);
1863 return -ESRCH;
1864 }
1865 spin_lock_bh(&pmc->mca_lock);
1866 sf_markstate(pmc);
1867 if (!delta) {
1868 if (!pmc->mca_sfcount[sfmode]) {
1869 spin_unlock_bh(&pmc->mca_lock);
1870 read_unlock_bh(&idev->lock);
1871 return -EINVAL;
1872 }
1873 pmc->mca_sfcount[sfmode]--;
1874 }
1875 err = 0;
1876 for (i=0; i<sfcount; i++) {
1877 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1878
1879 changerec |= rv > 0;
1880 if (!err && rv < 0)
1881 err = rv;
1882 }
1883 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
1884 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
1885 pmc->mca_sfcount[MCAST_INCLUDE]) {
1886 struct ip6_sf_list *psf;
1887
1888 /* filter mode change */
1889 pmc->mca_sfmode = MCAST_INCLUDE;
1890 pmc->mca_crcount = idev->mc_qrv;
1891 idev->mc_ifc_count = pmc->mca_crcount;
1892 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
1893 psf->sf_crcount = 0;
1894 mld_ifc_event(pmc->idev);
1895 } else if (sf_setstate(pmc) || changerec)
1896 mld_ifc_event(pmc->idev);
1897 spin_unlock_bh(&pmc->mca_lock);
1898 read_unlock_bh(&idev->lock);
1899 return err;
1900}
1901
1902/*
1903 * Add multicast single-source filter to the interface list
1904 */
1905static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
1906 const struct in6_addr *psfsrc)
1907{
1908 struct ip6_sf_list *psf, *psf_prev;
1909
1910 psf_prev = NULL;
1911 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1912 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
1913 break;
1914 psf_prev = psf;
1915 }
1916 if (!psf) {
1917 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1918 if (!psf)
1919 return -ENOBUFS;
1920
1921 psf->sf_addr = *psfsrc;
1922 if (psf_prev) {
1923 psf_prev->sf_next = psf;
1924 } else
1925 pmc->mca_sources = psf;
1926 }
1927 psf->sf_count[sfmode]++;
1928 return 0;
1929}
1930
1931static void sf_markstate(struct ifmcaddr6 *pmc)
1932{
1933 struct ip6_sf_list *psf;
1934 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
1935
1936 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
1937 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1938 psf->sf_oldin = mca_xcount ==
1939 psf->sf_count[MCAST_EXCLUDE] &&
1940 !psf->sf_count[MCAST_INCLUDE];
1941 } else
1942 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
1943}
1944
1945static int sf_setstate(struct ifmcaddr6 *pmc)
1946{
1947 struct ip6_sf_list *psf, *dpsf;
1948 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
1949 int qrv = pmc->idev->mc_qrv;
1950 int new_in, rv;
1951
1952 rv = 0;
1953 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1954 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1955 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
1956 !psf->sf_count[MCAST_INCLUDE];
1957 } else
1958 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1959 if (new_in) {
1960 if (!psf->sf_oldin) {
1961 struct ip6_sf_list *prev = NULL;
1962
1963 for (dpsf=pmc->mca_tomb; dpsf;
1964 dpsf=dpsf->sf_next) {
1965 if (ipv6_addr_equal(&dpsf->sf_addr,
1966 &psf->sf_addr))
1967 break;
1968 prev = dpsf;
1969 }
1970 if (dpsf) {
1971 if (prev)
1972 prev->sf_next = dpsf->sf_next;
1973 else
1974 pmc->mca_tomb = dpsf->sf_next;
1975 kfree(dpsf);
1976 }
1977 psf->sf_crcount = qrv;
1978 rv++;
1979 }
1980 } else if (psf->sf_oldin) {
1981 psf->sf_crcount = 0;
1982 /*
1983 * add or update "delete" records if an active filter
1984 * is now inactive
1985 */
1986 for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
1987 if (ipv6_addr_equal(&dpsf->sf_addr,
1988 &psf->sf_addr))
1989 break;
1990 if (!dpsf) {
1991 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
1992 if (!dpsf)
1993 continue;
1994 *dpsf = *psf;
1995 /* pmc->mca_lock held by callers */
1996 dpsf->sf_next = pmc->mca_tomb;
1997 pmc->mca_tomb = dpsf;
1998 }
1999 dpsf->sf_crcount = qrv;
2000 rv++;
2001 }
2002 }
2003 return rv;
2004}
2005
2006/*
2007 * Add multicast source filter list to the interface list
2008 */
2009static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2010 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2011 int delta)
2012{
2013 struct ifmcaddr6 *pmc;
2014 int isexclude;
2015 int i, err;
2016
2017 if (!idev)
2018 return -ENODEV;
2019 read_lock_bh(&idev->lock);
2020 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2021 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2022 break;
2023 }
2024 if (!pmc) {
2025 /* MCA not found?? bug */
2026 read_unlock_bh(&idev->lock);
2027 return -ESRCH;
2028 }
2029 spin_lock_bh(&pmc->mca_lock);
2030
2031 sf_markstate(pmc);
2032 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2033 if (!delta)
2034 pmc->mca_sfcount[sfmode]++;
2035 err = 0;
2036 for (i=0; i<sfcount; i++) {
2037 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2038 if (err)
2039 break;
2040 }
2041 if (err) {
2042 int j;
2043
2044 if (!delta)
2045 pmc->mca_sfcount[sfmode]--;
2046 for (j=0; j<i; j++)
2047 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2048 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2049 struct ip6_sf_list *psf;
2050
2051 /* filter mode change */
2052 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2053 pmc->mca_sfmode = MCAST_EXCLUDE;
2054 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2055 pmc->mca_sfmode = MCAST_INCLUDE;
2056 /* else no filters; keep old mode for reports */
2057
2058 pmc->mca_crcount = idev->mc_qrv;
2059 idev->mc_ifc_count = pmc->mca_crcount;
2060 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2061 psf->sf_crcount = 0;
2062 mld_ifc_event(idev);
2063 } else if (sf_setstate(pmc))
2064 mld_ifc_event(idev);
2065 spin_unlock_bh(&pmc->mca_lock);
2066 read_unlock_bh(&idev->lock);
2067 return err;
2068}
2069
2070static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2071{
2072 struct ip6_sf_list *psf, *nextpsf;
2073
2074 for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
2075 nextpsf = psf->sf_next;
2076 kfree(psf);
2077 }
2078 pmc->mca_tomb = NULL;
2079 for (psf=pmc->mca_sources; psf; psf=nextpsf) {
2080 nextpsf = psf->sf_next;
2081 kfree(psf);
2082 }
2083 pmc->mca_sources = NULL;
2084 pmc->mca_sfmode = MCAST_EXCLUDE;
2085 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2086 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2087}
2088
2089
2090static void igmp6_join_group(struct ifmcaddr6 *ma)
2091{
2092 unsigned long delay;
2093
2094 if (ma->mca_flags & MAF_NOREPORT)
2095 return;
2096
2097 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2098
2099 delay = net_random() % IGMP6_UNSOLICITED_IVAL;
2100
2101 spin_lock_bh(&ma->mca_lock);
2102 if (del_timer(&ma->mca_timer)) {
2103 atomic_dec(&ma->mca_refcnt);
2104 delay = ma->mca_timer.expires - jiffies;
2105 }
2106
2107 if (!mod_timer(&ma->mca_timer, jiffies + delay))
2108 atomic_inc(&ma->mca_refcnt);
2109 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2110 spin_unlock_bh(&ma->mca_lock);
2111}
2112
2113static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2114 struct inet6_dev *idev)
2115{
2116 int err;
2117
2118 /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
2119 * so no other readers or writers of iml or its sflist
2120 */
2121 if (!iml->sflist) {
2122 /* any-source empty exclude case */
2123 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2124 }
2125 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2126 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2127 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2128 iml->sflist = NULL;
2129 return err;
2130}
2131
2132static void igmp6_leave_group(struct ifmcaddr6 *ma)
2133{
2134 if (MLD_V1_SEEN(ma->idev)) {
2135 if (ma->mca_flags & MAF_LAST_REPORTER)
2136 igmp6_send(&ma->mca_addr, ma->idev->dev,
2137 ICMPV6_MGM_REDUCTION);
2138 } else {
2139 mld_add_delrec(ma->idev, ma);
2140 mld_ifc_event(ma->idev);
2141 }
2142}
2143
2144static void mld_gq_timer_expire(unsigned long data)
2145{
2146 struct inet6_dev *idev = (struct inet6_dev *)data;
2147
2148 idev->mc_gq_running = 0;
2149 mld_send_report(idev, NULL);
2150 __in6_dev_put(idev);
2151}
2152
2153static void mld_ifc_timer_expire(unsigned long data)
2154{
2155 struct inet6_dev *idev = (struct inet6_dev *)data;
2156
2157 mld_send_cr(idev);
2158 if (idev->mc_ifc_count) {
2159 idev->mc_ifc_count--;
2160 if (idev->mc_ifc_count)
2161 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2162 }
2163 __in6_dev_put(idev);
2164}
2165
2166static void mld_ifc_event(struct inet6_dev *idev)
2167{
2168 if (MLD_V1_SEEN(idev))
2169 return;
2170 idev->mc_ifc_count = idev->mc_qrv;
2171 mld_ifc_start_timer(idev, 1);
2172}
2173
2174
2175static void igmp6_timer_handler(unsigned long data)
2176{
2177 struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
2178
2179 if (MLD_V1_SEEN(ma->idev))
2180 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2181 else
2182 mld_send_report(ma->idev, ma);
2183
2184 spin_lock(&ma->mca_lock);
2185 ma->mca_flags |= MAF_LAST_REPORTER;
2186 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2187 spin_unlock(&ma->mca_lock);
2188 ma_put(ma);
2189}
2190
2191/* Device changing type */
2192
2193void ipv6_mc_unmap(struct inet6_dev *idev)
2194{
2195 struct ifmcaddr6 *i;
2196
2197 /* Install multicast list, except for all-nodes (already installed) */
2198
2199 read_lock_bh(&idev->lock);
2200 for (i = idev->mc_list; i; i = i->next)
2201 igmp6_group_dropped(i);
2202 read_unlock_bh(&idev->lock);
2203}
2204
2205void ipv6_mc_remap(struct inet6_dev *idev)
2206{
2207 ipv6_mc_up(idev);
2208}
2209
2210/* Device going down */
2211
2212void ipv6_mc_down(struct inet6_dev *idev)
2213{
2214 struct ifmcaddr6 *i;
2215
2216 /* Withdraw multicast list */
2217
2218 read_lock_bh(&idev->lock);
2219 idev->mc_ifc_count = 0;
2220 if (del_timer(&idev->mc_ifc_timer))
2221 __in6_dev_put(idev);
2222 idev->mc_gq_running = 0;
2223 if (del_timer(&idev->mc_gq_timer))
2224 __in6_dev_put(idev);
2225
2226 for (i = idev->mc_list; i; i=i->next)
2227 igmp6_group_dropped(i);
2228 read_unlock_bh(&idev->lock);
2229
2230 mld_clear_delrec(idev);
2231}
2232
2233
2234/* Device going up */
2235
2236void ipv6_mc_up(struct inet6_dev *idev)
2237{
2238 struct ifmcaddr6 *i;
2239
2240 /* Install multicast list, except for all-nodes (already installed) */
2241
2242 read_lock_bh(&idev->lock);
2243 for (i = idev->mc_list; i; i=i->next)
2244 igmp6_group_added(i);
2245 read_unlock_bh(&idev->lock);
2246}
2247
2248/* IPv6 device initialization. */
2249
2250void ipv6_mc_init_dev(struct inet6_dev *idev)
2251{
2252 write_lock_bh(&idev->lock);
2253 spin_lock_init(&idev->mc_lock);
2254 idev->mc_gq_running = 0;
2255 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2256 (unsigned long)idev);
2257 idev->mc_tomb = NULL;
2258 idev->mc_ifc_count = 0;
2259 setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
2260 (unsigned long)idev);
2261 idev->mc_qrv = MLD_QRV_DEFAULT;
2262 idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
2263 idev->mc_v1_seen = 0;
2264 write_unlock_bh(&idev->lock);
2265}
2266
2267/*
2268 * Device is about to be destroyed: clean up.
2269 */
2270
2271void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2272{
2273 struct ifmcaddr6 *i;
2274
2275 /* Deactivate timers */
2276 ipv6_mc_down(idev);
2277
2278 /* Delete all-nodes address. */
2279 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2280 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2281 * fail.
2282 */
2283 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2284
2285 if (idev->cnf.forwarding)
2286 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2287
2288 write_lock_bh(&idev->lock);
2289 while ((i = idev->mc_list) != NULL) {
2290 idev->mc_list = i->next;
2291 write_unlock_bh(&idev->lock);
2292
2293 igmp6_group_dropped(i);
2294 ma_put(i);
2295
2296 write_lock_bh(&idev->lock);
2297 }
2298 write_unlock_bh(&idev->lock);
2299}
2300
2301#ifdef CONFIG_PROC_FS
2302struct igmp6_mc_iter_state {
2303 struct seq_net_private p;
2304 struct net_device *dev;
2305 struct inet6_dev *idev;
2306};
2307
2308#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2309
2310static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2311{
2312 struct ifmcaddr6 *im = NULL;
2313 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2314 struct net *net = seq_file_net(seq);
2315
2316 state->idev = NULL;
2317 for_each_netdev_rcu(net, state->dev) {
2318 struct inet6_dev *idev;
2319 idev = __in6_dev_get(state->dev);
2320 if (!idev)
2321 continue;
2322 read_lock_bh(&idev->lock);
2323 im = idev->mc_list;
2324 if (im) {
2325 state->idev = idev;
2326 break;
2327 }
2328 read_unlock_bh(&idev->lock);
2329 }
2330 return im;
2331}
2332
2333static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2334{
2335 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2336
2337 im = im->next;
2338 while (!im) {
2339 if (likely(state->idev != NULL))
2340 read_unlock_bh(&state->idev->lock);
2341
2342 state->dev = next_net_device_rcu(state->dev);
2343 if (!state->dev) {
2344 state->idev = NULL;
2345 break;
2346 }
2347 state->idev = __in6_dev_get(state->dev);
2348 if (!state->idev)
2349 continue;
2350 read_lock_bh(&state->idev->lock);
2351 im = state->idev->mc_list;
2352 }
2353 return im;
2354}
2355
2356static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2357{
2358 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2359 if (im)
2360 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2361 --pos;
2362 return pos ? NULL : im;
2363}
2364
2365static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2366 __acquires(RCU)
2367{
2368 rcu_read_lock();
2369 return igmp6_mc_get_idx(seq, *pos);
2370}
2371
2372static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2373{
2374 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2375
2376 ++*pos;
2377 return im;
2378}
2379
2380static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2381 __releases(RCU)
2382{
2383 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2384
2385 if (likely(state->idev != NULL)) {
2386 read_unlock_bh(&state->idev->lock);
2387 state->idev = NULL;
2388 }
2389 state->dev = NULL;
2390 rcu_read_unlock();
2391}
2392
2393static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2394{
2395 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2396 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2397
2398 seq_printf(seq,
2399 "%-4d %-15s %pi6 %5d %08X %ld\n",
2400 state->dev->ifindex, state->dev->name,
2401 &im->mca_addr,
2402 im->mca_users, im->mca_flags,
2403 (im->mca_flags&MAF_TIMER_RUNNING) ?
2404 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2405 return 0;
2406}
2407
2408static const struct seq_operations igmp6_mc_seq_ops = {
2409 .start = igmp6_mc_seq_start,
2410 .next = igmp6_mc_seq_next,
2411 .stop = igmp6_mc_seq_stop,
2412 .show = igmp6_mc_seq_show,
2413};
2414
2415static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
2416{
2417 return seq_open_net(inode, file, &igmp6_mc_seq_ops,
2418 sizeof(struct igmp6_mc_iter_state));
2419}
2420
2421static const struct file_operations igmp6_mc_seq_fops = {
2422 .owner = THIS_MODULE,
2423 .open = igmp6_mc_seq_open,
2424 .read = seq_read,
2425 .llseek = seq_lseek,
2426 .release = seq_release_net,
2427};
2428
2429struct igmp6_mcf_iter_state {
2430 struct seq_net_private p;
2431 struct net_device *dev;
2432 struct inet6_dev *idev;
2433 struct ifmcaddr6 *im;
2434};
2435
2436#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2437
2438static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2439{
2440 struct ip6_sf_list *psf = NULL;
2441 struct ifmcaddr6 *im = NULL;
2442 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2443 struct net *net = seq_file_net(seq);
2444
2445 state->idev = NULL;
2446 state->im = NULL;
2447 for_each_netdev_rcu(net, state->dev) {
2448 struct inet6_dev *idev;
2449 idev = __in6_dev_get(state->dev);
2450 if (unlikely(idev == NULL))
2451 continue;
2452 read_lock_bh(&idev->lock);
2453 im = idev->mc_list;
2454 if (likely(im != NULL)) {
2455 spin_lock_bh(&im->mca_lock);
2456 psf = im->mca_sources;
2457 if (likely(psf != NULL)) {
2458 state->im = im;
2459 state->idev = idev;
2460 break;
2461 }
2462 spin_unlock_bh(&im->mca_lock);
2463 }
2464 read_unlock_bh(&idev->lock);
2465 }
2466 return psf;
2467}
2468
2469static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2470{
2471 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2472
2473 psf = psf->sf_next;
2474 while (!psf) {
2475 spin_unlock_bh(&state->im->mca_lock);
2476 state->im = state->im->next;
2477 while (!state->im) {
2478 if (likely(state->idev != NULL))
2479 read_unlock_bh(&state->idev->lock);
2480
2481 state->dev = next_net_device_rcu(state->dev);
2482 if (!state->dev) {
2483 state->idev = NULL;
2484 goto out;
2485 }
2486 state->idev = __in6_dev_get(state->dev);
2487 if (!state->idev)
2488 continue;
2489 read_lock_bh(&state->idev->lock);
2490 state->im = state->idev->mc_list;
2491 }
2492 if (!state->im)
2493 break;
2494 spin_lock_bh(&state->im->mca_lock);
2495 psf = state->im->mca_sources;
2496 }
2497out:
2498 return psf;
2499}
2500
2501static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2502{
2503 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2504 if (psf)
2505 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2506 --pos;
2507 return pos ? NULL : psf;
2508}
2509
2510static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2511 __acquires(RCU)
2512{
2513 rcu_read_lock();
2514 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2515}
2516
2517static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2518{
2519 struct ip6_sf_list *psf;
2520 if (v == SEQ_START_TOKEN)
2521 psf = igmp6_mcf_get_first(seq);
2522 else
2523 psf = igmp6_mcf_get_next(seq, v);
2524 ++*pos;
2525 return psf;
2526}
2527
2528static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2529 __releases(RCU)
2530{
2531 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2532 if (likely(state->im != NULL)) {
2533 spin_unlock_bh(&state->im->mca_lock);
2534 state->im = NULL;
2535 }
2536 if (likely(state->idev != NULL)) {
2537 read_unlock_bh(&state->idev->lock);
2538 state->idev = NULL;
2539 }
2540 state->dev = NULL;
2541 rcu_read_unlock();
2542}
2543
2544static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2545{
2546 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2547 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2548
2549 if (v == SEQ_START_TOKEN) {
2550 seq_printf(seq,
2551 "%3s %6s "
2552 "%32s %32s %6s %6s\n", "Idx",
2553 "Device", "Multicast Address",
2554 "Source Address", "INC", "EXC");
2555 } else {
2556 seq_printf(seq,
2557 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2558 state->dev->ifindex, state->dev->name,
2559 &state->im->mca_addr,
2560 &psf->sf_addr,
2561 psf->sf_count[MCAST_INCLUDE],
2562 psf->sf_count[MCAST_EXCLUDE]);
2563 }
2564 return 0;
2565}
2566
2567static const struct seq_operations igmp6_mcf_seq_ops = {
2568 .start = igmp6_mcf_seq_start,
2569 .next = igmp6_mcf_seq_next,
2570 .stop = igmp6_mcf_seq_stop,
2571 .show = igmp6_mcf_seq_show,
2572};
2573
2574static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2575{
2576 return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
2577 sizeof(struct igmp6_mcf_iter_state));
2578}
2579
2580static const struct file_operations igmp6_mcf_seq_fops = {
2581 .owner = THIS_MODULE,
2582 .open = igmp6_mcf_seq_open,
2583 .read = seq_read,
2584 .llseek = seq_lseek,
2585 .release = seq_release_net,
2586};
2587
2588static int __net_init igmp6_proc_init(struct net *net)
2589{
2590 int err;
2591
2592 err = -ENOMEM;
2593 if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops))
2594 goto out;
2595 if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO,
2596 &igmp6_mcf_seq_fops))
2597 goto out_proc_net_igmp6;
2598
2599 err = 0;
2600out:
2601 return err;
2602
2603out_proc_net_igmp6:
2604 proc_net_remove(net, "igmp6");
2605 goto out;
2606}
2607
2608static void __net_exit igmp6_proc_exit(struct net *net)
2609{
2610 proc_net_remove(net, "mcfilter6");
2611 proc_net_remove(net, "igmp6");
2612}
2613#else
2614static inline int igmp6_proc_init(struct net *net)
2615{
2616 return 0;
2617}
2618static inline void igmp6_proc_exit(struct net *net)
2619{
2620}
2621#endif
2622
2623static int __net_init igmp6_net_init(struct net *net)
2624{
2625 int err;
2626
2627 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2628 SOCK_RAW, IPPROTO_ICMPV6, net);
2629 if (err < 0) {
2630 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2631 err);
2632 goto out;
2633 }
2634
2635 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2636
2637 err = igmp6_proc_init(net);
2638 if (err)
2639 goto out_sock_create;
2640out:
2641 return err;
2642
2643out_sock_create:
2644 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2645 goto out;
2646}
2647
2648static void __net_exit igmp6_net_exit(struct net *net)
2649{
2650 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2651 igmp6_proc_exit(net);
2652}
2653
2654static struct pernet_operations igmp6_net_ops = {
2655 .init = igmp6_net_init,
2656 .exit = igmp6_net_exit,
2657};
2658
2659int __init igmp6_init(void)
2660{
2661 return register_pernet_subsys(&igmp6_net_ops);
2662}
2663
2664void igmp6_cleanup(void)
2665{
2666 unregister_pernet_subsys(&igmp6_net_ops);
2667}