Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) B.A.T.M.A.N. contributors:
3 *
4 * Linus Lüssing
5 */
6
7#include "multicast.h"
8#include "main.h"
9
10#include <linux/atomic.h>
11#include <linux/bitops.h>
12#include <linux/bug.h>
13#include <linux/byteorder/generic.h>
14#include <linux/errno.h>
15#include <linux/etherdevice.h>
16#include <linux/gfp.h>
17#include <linux/icmpv6.h>
18#include <linux/if_bridge.h>
19#include <linux/if_ether.h>
20#include <linux/igmp.h>
21#include <linux/in.h>
22#include <linux/in6.h>
23#include <linux/inetdevice.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <linux/jiffies.h>
27#include <linux/kernel.h>
28#include <linux/kref.h>
29#include <linux/list.h>
30#include <linux/lockdep.h>
31#include <linux/netdevice.h>
32#include <linux/netlink.h>
33#include <linux/printk.h>
34#include <linux/rculist.h>
35#include <linux/rcupdate.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h>
39#include <linux/stddef.h>
40#include <linux/string.h>
41#include <linux/types.h>
42#include <linux/workqueue.h>
43#include <net/addrconf.h>
44#include <net/genetlink.h>
45#include <net/if_inet6.h>
46#include <net/ip.h>
47#include <net/ipv6.h>
48#include <net/netlink.h>
49#include <net/sock.h>
50#include <uapi/linux/batadv_packet.h>
51#include <uapi/linux/batman_adv.h>
52
53#include "bridge_loop_avoidance.h"
54#include "hard-interface.h"
55#include "hash.h"
56#include "log.h"
57#include "netlink.h"
58#include "send.h"
59#include "soft-interface.h"
60#include "translation-table.h"
61#include "tvlv.h"
62
63static void batadv_mcast_mla_update(struct work_struct *work);
64
65/**
66 * batadv_mcast_start_timer() - schedule the multicast periodic worker
67 * @bat_priv: the bat priv with all the soft interface information
68 */
69static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
70{
71 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
72 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
73}
74
75/**
76 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
77 * @soft_iface: netdev struct of the mesh interface
78 *
79 * If the given soft interface has a bridge on top then the refcount
80 * of the according net device is increased.
81 *
82 * Return: NULL if no such bridge exists. Otherwise the net device of the
83 * bridge.
84 */
85static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
86{
87 struct net_device *upper = soft_iface;
88
89 rcu_read_lock();
90 do {
91 upper = netdev_master_upper_dev_get_rcu(upper);
92 } while (upper && !(upper->priv_flags & IFF_EBRIDGE));
93
94 if (upper)
95 dev_hold(upper);
96 rcu_read_unlock();
97
98 return upper;
99}
100
101/**
102 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from
103 * node for IPv4
104 * @dev: the interface to check
105 *
106 * Checks the presence of an IPv4 multicast router on this node.
107 *
108 * Caller needs to hold rcu read lock.
109 *
110 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise.
111 */
112static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev)
113{
114 struct in_device *in_dev = __in_dev_get_rcu(dev);
115
116 if (in_dev && IN_DEV_MFORWARD(in_dev))
117 return BATADV_NO_FLAGS;
118 else
119 return BATADV_MCAST_WANT_NO_RTR4;
120}
121
122/**
123 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from
124 * node for IPv6
125 * @dev: the interface to check
126 *
127 * Checks the presence of an IPv6 multicast router on this node.
128 *
129 * Caller needs to hold rcu read lock.
130 *
131 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise.
132 */
133#if IS_ENABLED(CONFIG_IPV6_MROUTE)
134static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
135{
136 struct inet6_dev *in6_dev = __in6_dev_get(dev);
137
138 if (in6_dev && in6_dev->cnf.mc_forwarding)
139 return BATADV_NO_FLAGS;
140 else
141 return BATADV_MCAST_WANT_NO_RTR6;
142}
143#else
144static inline u8
145batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
146{
147 return BATADV_MCAST_WANT_NO_RTR6;
148}
149#endif
150
151/**
152 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node
153 * @bat_priv: the bat priv with all the soft interface information
154 * @bridge: bridge interface on top of the soft_iface if present,
155 * otherwise pass NULL
156 *
157 * Checks the presence of IPv4 and IPv6 multicast routers on this
158 * node.
159 *
160 * Return:
161 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
162 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
163 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
164 * The former two OR'd: no multicast router is present
165 */
166static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
167 struct net_device *bridge)
168{
169 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface;
170 u8 flags = BATADV_NO_FLAGS;
171
172 rcu_read_lock();
173
174 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev);
175 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev);
176
177 rcu_read_unlock();
178
179 return flags;
180}
181
182/**
183 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge
184 * @bat_priv: the bat priv with all the soft interface information
185 * @bridge: bridge interface on top of the soft_iface if present,
186 * otherwise pass NULL
187 *
188 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge.
189 *
190 * Return:
191 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
192 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
193 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
194 * The former two OR'd: no multicast router is present
195 */
196static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
197 struct net_device *bridge)
198{
199 struct net_device *dev = bat_priv->soft_iface;
200 u8 flags = BATADV_NO_FLAGS;
201
202 if (!bridge)
203 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
204
205 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP))
206 flags |= BATADV_MCAST_WANT_NO_RTR4;
207 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6))
208 flags |= BATADV_MCAST_WANT_NO_RTR6;
209
210 return flags;
211}
212
213/**
214 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags
215 * @bat_priv: the bat priv with all the soft interface information
216 * @bridge: bridge interface on top of the soft_iface if present,
217 * otherwise pass NULL
218 *
219 * Checks the presence of IPv4 and IPv6 multicast routers on this
220 * node or behind its bridge.
221 *
222 * Return:
223 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
224 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
225 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
226 * The former two OR'd: no multicast router is present
227 */
228static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
229 struct net_device *bridge)
230{
231 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
232
233 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge);
234 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge);
235
236 return flags;
237}
238
239/**
240 * batadv_mcast_mla_flags_get() - get the new multicast flags
241 * @bat_priv: the bat priv with all the soft interface information
242 *
243 * Return: A set of flags for the current/next TVLV, querier and
244 * bridge state.
245 */
246static struct batadv_mcast_mla_flags
247batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
248{
249 struct net_device *dev = bat_priv->soft_iface;
250 struct batadv_mcast_querier_state *qr4, *qr6;
251 struct batadv_mcast_mla_flags mla_flags;
252 struct net_device *bridge;
253
254 bridge = batadv_mcast_get_bridge(dev);
255
256 memset(&mla_flags, 0, sizeof(mla_flags));
257 mla_flags.enabled = 1;
258 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv,
259 bridge);
260
261 if (!bridge)
262 return mla_flags;
263
264 dev_put(bridge);
265
266 mla_flags.bridged = 1;
267 qr4 = &mla_flags.querier_ipv4;
268 qr6 = &mla_flags.querier_ipv6;
269
270 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
271 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
272
273 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
274 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
275
276 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
277 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
278
279 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
280
281 /* 1) If no querier exists at all, then multicast listeners on
282 * our local TT clients behind the bridge will keep silent.
283 * 2) If the selected querier is on one of our local TT clients,
284 * behind the bridge, then this querier might shadow multicast
285 * listeners on our local TT clients, behind this bridge.
286 *
287 * In both cases, we will signalize other batman nodes that
288 * we need all multicast traffic of the according protocol.
289 */
290 if (!qr4->exists || qr4->shadowing) {
291 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
292 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4;
293 }
294
295 if (!qr6->exists || qr6->shadowing) {
296 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
297 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6;
298 }
299
300 return mla_flags;
301}
302
303/**
304 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
305 * @mcast_addr: the multicast address to check
306 * @mcast_list: the list with multicast addresses to search in
307 *
308 * Return: true if the given address is already in the given list.
309 * Otherwise returns false.
310 */
311static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
312 struct hlist_head *mcast_list)
313{
314 struct batadv_hw_addr *mcast_entry;
315
316 hlist_for_each_entry(mcast_entry, mcast_list, list)
317 if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
318 return true;
319
320 return false;
321}
322
323/**
324 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners
325 * @dev: the device to collect multicast addresses from
326 * @mcast_list: a list to put found addresses into
327 * @flags: flags indicating the new multicast state
328 *
329 * Collects multicast addresses of IPv4 multicast listeners residing
330 * on this kernel on the given soft interface, dev, in
331 * the given mcast_list. In general, multicast listeners provided by
332 * your multicast receiving applications run directly on this node.
333 *
334 * Return: -ENOMEM on memory allocation error or the number of
335 * items added to the mcast_list otherwise.
336 */
337static int
338batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
339 struct hlist_head *mcast_list,
340 struct batadv_mcast_mla_flags *flags)
341{
342 struct batadv_hw_addr *new;
343 struct in_device *in_dev;
344 u8 mcast_addr[ETH_ALEN];
345 struct ip_mc_list *pmc;
346 int ret = 0;
347
348 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
349 return 0;
350
351 rcu_read_lock();
352
353 in_dev = __in_dev_get_rcu(dev);
354 if (!in_dev) {
355 rcu_read_unlock();
356 return 0;
357 }
358
359 for (pmc = rcu_dereference(in_dev->mc_list); pmc;
360 pmc = rcu_dereference(pmc->next_rcu)) {
361 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
362 ipv4_is_local_multicast(pmc->multiaddr))
363 continue;
364
365 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
366 !ipv4_is_local_multicast(pmc->multiaddr))
367 continue;
368
369 ip_eth_mc_map(pmc->multiaddr, mcast_addr);
370
371 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
372 continue;
373
374 new = kmalloc(sizeof(*new), GFP_ATOMIC);
375 if (!new) {
376 ret = -ENOMEM;
377 break;
378 }
379
380 ether_addr_copy(new->addr, mcast_addr);
381 hlist_add_head(&new->list, mcast_list);
382 ret++;
383 }
384 rcu_read_unlock();
385
386 return ret;
387}
388
389/**
390 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners
391 * @dev: the device to collect multicast addresses from
392 * @mcast_list: a list to put found addresses into
393 * @flags: flags indicating the new multicast state
394 *
395 * Collects multicast addresses of IPv6 multicast listeners residing
396 * on this kernel on the given soft interface, dev, in
397 * the given mcast_list. In general, multicast listeners provided by
398 * your multicast receiving applications run directly on this node.
399 *
400 * Return: -ENOMEM on memory allocation error or the number of
401 * items added to the mcast_list otherwise.
402 */
403#if IS_ENABLED(CONFIG_IPV6)
404static int
405batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
406 struct hlist_head *mcast_list,
407 struct batadv_mcast_mla_flags *flags)
408{
409 struct batadv_hw_addr *new;
410 struct inet6_dev *in6_dev;
411 u8 mcast_addr[ETH_ALEN];
412 struct ifmcaddr6 *pmc6;
413 int ret = 0;
414
415 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
416 return 0;
417
418 rcu_read_lock();
419
420 in6_dev = __in6_dev_get(dev);
421 if (!in6_dev) {
422 rcu_read_unlock();
423 return 0;
424 }
425
426 for (pmc6 = rcu_dereference(in6_dev->mc_list);
427 pmc6;
428 pmc6 = rcu_dereference(pmc6->next)) {
429 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
430 IPV6_ADDR_SCOPE_LINKLOCAL)
431 continue;
432
433 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
434 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr))
435 continue;
436
437 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
438 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) >
439 IPV6_ADDR_SCOPE_LINKLOCAL)
440 continue;
441
442 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
443
444 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
445 continue;
446
447 new = kmalloc(sizeof(*new), GFP_ATOMIC);
448 if (!new) {
449 ret = -ENOMEM;
450 break;
451 }
452
453 ether_addr_copy(new->addr, mcast_addr);
454 hlist_add_head(&new->list, mcast_list);
455 ret++;
456 }
457 rcu_read_unlock();
458
459 return ret;
460}
461#else
462static inline int
463batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
464 struct hlist_head *mcast_list,
465 struct batadv_mcast_mla_flags *flags)
466{
467 return 0;
468}
469#endif
470
471/**
472 * batadv_mcast_mla_softif_get() - get softif multicast listeners
473 * @dev: the device to collect multicast addresses from
474 * @mcast_list: a list to put found addresses into
475 * @flags: flags indicating the new multicast state
476 *
477 * Collects multicast addresses of multicast listeners residing
478 * on this kernel on the given soft interface, dev, in
479 * the given mcast_list. In general, multicast listeners provided by
480 * your multicast receiving applications run directly on this node.
481 *
482 * If there is a bridge interface on top of dev, collect from that one
483 * instead. Just like with IP addresses and routes, multicast listeners
484 * will(/should) register to the bridge interface instead of an
485 * enslaved bat0.
486 *
487 * Return: -ENOMEM on memory allocation error or the number of
488 * items added to the mcast_list otherwise.
489 */
490static int
491batadv_mcast_mla_softif_get(struct net_device *dev,
492 struct hlist_head *mcast_list,
493 struct batadv_mcast_mla_flags *flags)
494{
495 struct net_device *bridge = batadv_mcast_get_bridge(dev);
496 int ret4, ret6 = 0;
497
498 if (bridge)
499 dev = bridge;
500
501 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
502 if (ret4 < 0)
503 goto out;
504
505 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
506 if (ret6 < 0) {
507 ret4 = 0;
508 goto out;
509 }
510
511out:
512 if (bridge)
513 dev_put(bridge);
514
515 return ret4 + ret6;
516}
517
518/**
519 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
520 * @dst: destination to write to - a multicast MAC address
521 * @src: source to read from - a multicast IP address
522 *
523 * Converts a given multicast IPv4/IPv6 address from a bridge
524 * to its matching multicast MAC address and copies it into the given
525 * destination buffer.
526 *
527 * Caller needs to make sure the destination buffer can hold
528 * at least ETH_ALEN bytes.
529 */
530static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
531{
532 if (src->proto == htons(ETH_P_IP))
533 ip_eth_mc_map(src->dst.ip4, dst);
534#if IS_ENABLED(CONFIG_IPV6)
535 else if (src->proto == htons(ETH_P_IPV6))
536 ipv6_eth_mc_map(&src->dst.ip6, dst);
537#endif
538 else
539 eth_zero_addr(dst);
540}
541
542/**
543 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
544 * @dev: a bridge slave whose bridge to collect multicast addresses from
545 * @mcast_list: a list to put found addresses into
546 * @flags: flags indicating the new multicast state
547 *
548 * Collects multicast addresses of multicast listeners residing
549 * on foreign, non-mesh devices which we gave access to our mesh via
550 * a bridge on top of the given soft interface, dev, in the given
551 * mcast_list.
552 *
553 * Return: -ENOMEM on memory allocation error or the number of
554 * items added to the mcast_list otherwise.
555 */
556static int batadv_mcast_mla_bridge_get(struct net_device *dev,
557 struct hlist_head *mcast_list,
558 struct batadv_mcast_mla_flags *flags)
559{
560 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
561 struct br_ip_list *br_ip_entry, *tmp;
562 u8 tvlv_flags = flags->tvlv_flags;
563 struct batadv_hw_addr *new;
564 u8 mcast_addr[ETH_ALEN];
565 int ret;
566
567 /* we don't need to detect these devices/listeners, the IGMP/MLD
568 * snooping code of the Linux bridge already does that for us
569 */
570 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
571 if (ret < 0)
572 goto out;
573
574 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
575 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) {
576 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
577 continue;
578
579 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
580 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
581 continue;
582
583 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
584 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
585 continue;
586 }
587
588#if IS_ENABLED(CONFIG_IPV6)
589 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) {
590 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
591 continue;
592
593 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
594 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
595 continue;
596
597 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
598 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
599 IPV6_ADDR_SCOPE_LINKLOCAL)
600 continue;
601 }
602#endif
603
604 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
605 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
606 continue;
607
608 new = kmalloc(sizeof(*new), GFP_ATOMIC);
609 if (!new) {
610 ret = -ENOMEM;
611 break;
612 }
613
614 ether_addr_copy(new->addr, mcast_addr);
615 hlist_add_head(&new->list, mcast_list);
616 }
617
618out:
619 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
620 list_del(&br_ip_entry->list);
621 kfree(br_ip_entry);
622 }
623
624 return ret;
625}
626
627/**
628 * batadv_mcast_mla_list_free() - free a list of multicast addresses
629 * @mcast_list: the list to free
630 *
631 * Removes and frees all items in the given mcast_list.
632 */
633static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
634{
635 struct batadv_hw_addr *mcast_entry;
636 struct hlist_node *tmp;
637
638 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
639 hlist_del(&mcast_entry->list);
640 kfree(mcast_entry);
641 }
642}
643
644/**
645 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
646 * @bat_priv: the bat priv with all the soft interface information
647 * @mcast_list: a list of addresses which should _not_ be removed
648 *
649 * Retracts the announcement of any multicast listener from the
650 * translation table except the ones listed in the given mcast_list.
651 *
652 * If mcast_list is NULL then all are retracted.
653 */
654static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
655 struct hlist_head *mcast_list)
656{
657 struct batadv_hw_addr *mcast_entry;
658 struct hlist_node *tmp;
659
660 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
661 list) {
662 if (mcast_list &&
663 batadv_mcast_mla_is_duplicate(mcast_entry->addr,
664 mcast_list))
665 continue;
666
667 batadv_tt_local_remove(bat_priv, mcast_entry->addr,
668 BATADV_NO_FLAGS,
669 "mcast TT outdated", false);
670
671 hlist_del(&mcast_entry->list);
672 kfree(mcast_entry);
673 }
674}
675
676/**
677 * batadv_mcast_mla_tt_add() - add multicast listener announcements
678 * @bat_priv: the bat priv with all the soft interface information
679 * @mcast_list: a list of addresses which are going to get added
680 *
681 * Adds multicast listener announcements from the given mcast_list to the
682 * translation table if they have not been added yet.
683 */
684static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
685 struct hlist_head *mcast_list)
686{
687 struct batadv_hw_addr *mcast_entry;
688 struct hlist_node *tmp;
689
690 if (!mcast_list)
691 return;
692
693 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
694 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
695 &bat_priv->mcast.mla_list))
696 continue;
697
698 if (!batadv_tt_local_add(bat_priv->soft_iface,
699 mcast_entry->addr, BATADV_NO_FLAGS,
700 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
701 continue;
702
703 hlist_del(&mcast_entry->list);
704 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
705 }
706}
707
708/**
709 * batadv_mcast_querier_log() - debug output regarding the querier status on
710 * link
711 * @bat_priv: the bat priv with all the soft interface information
712 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
713 * @old_state: the previous querier state on our link
714 * @new_state: the new querier state on our link
715 *
716 * Outputs debug messages to the logging facility with log level 'mcast'
717 * regarding changes to the querier status on the link which are relevant
718 * to our multicast optimizations.
719 *
720 * Usually this is about whether a querier appeared or vanished in
721 * our mesh or whether the querier is in the suboptimal position of being
722 * behind our local bridge segment: Snooping switches will directly
723 * forward listener reports to the querier, therefore batman-adv and
724 * the bridge will potentially not see these listeners - the querier is
725 * potentially shadowing listeners from us then.
726 *
727 * This is only interesting for nodes with a bridge on top of their
728 * soft interface.
729 */
730static void
731batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
732 struct batadv_mcast_querier_state *old_state,
733 struct batadv_mcast_querier_state *new_state)
734{
735 if (!old_state->exists && new_state->exists)
736 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
737 str_proto);
738 else if (old_state->exists && !new_state->exists)
739 batadv_info(bat_priv->soft_iface,
740 "%s Querier disappeared - multicast optimizations disabled\n",
741 str_proto);
742 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
743 batadv_info(bat_priv->soft_iface,
744 "No %s Querier present - multicast optimizations disabled\n",
745 str_proto);
746
747 if (new_state->exists) {
748 if ((!old_state->shadowing && new_state->shadowing) ||
749 (!old_state->exists && new_state->shadowing))
750 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
751 "%s Querier is behind our bridged segment: Might shadow listeners\n",
752 str_proto);
753 else if (old_state->shadowing && !new_state->shadowing)
754 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
755 "%s Querier is not behind our bridged segment\n",
756 str_proto);
757 }
758}
759
760/**
761 * batadv_mcast_bridge_log() - debug output for topology changes in bridged
762 * setups
763 * @bat_priv: the bat priv with all the soft interface information
764 * @new_flags: flags indicating the new multicast state
765 *
766 * If no bridges are ever used on this node, then this function does nothing.
767 *
768 * Otherwise this function outputs debug information to the 'mcast' log level
769 * which might be relevant to our multicast optimizations.
770 *
771 * More precisely, it outputs information when a bridge interface is added or
772 * removed from a soft interface. And when a bridge is present, it further
773 * outputs information about the querier state which is relevant for the
774 * multicast flags this node is going to set.
775 */
776static void
777batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
778 struct batadv_mcast_mla_flags *new_flags)
779{
780 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
781
782 if (!old_flags->bridged && new_flags->bridged)
783 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
784 "Bridge added: Setting Unsnoopables(U)-flag\n");
785 else if (old_flags->bridged && !new_flags->bridged)
786 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
787 "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
788
789 if (new_flags->bridged) {
790 batadv_mcast_querier_log(bat_priv, "IGMP",
791 &old_flags->querier_ipv4,
792 &new_flags->querier_ipv4);
793 batadv_mcast_querier_log(bat_priv, "MLD",
794 &old_flags->querier_ipv6,
795 &new_flags->querier_ipv6);
796 }
797}
798
799/**
800 * batadv_mcast_flags_log() - output debug information about mcast flag changes
801 * @bat_priv: the bat priv with all the soft interface information
802 * @flags: TVLV flags indicating the new multicast state
803 *
804 * Whenever the multicast TVLV flags this node announces change, this function
805 * should be used to notify userspace about the change.
806 */
807static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
808{
809 bool old_enabled = bat_priv->mcast.mla_flags.enabled;
810 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
811 char str_old_flags[] = "[.... . ]";
812
813 sprintf(str_old_flags, "[%c%c%c%s%s]",
814 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
815 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
816 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
817 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
818 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
819
820 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
821 "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n",
822 old_enabled ? str_old_flags : "<undefined>",
823 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
824 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
825 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
826 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
827 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
828}
829
830/**
831 * batadv_mcast_mla_flags_update() - update multicast flags
832 * @bat_priv: the bat priv with all the soft interface information
833 * @flags: flags indicating the new multicast state
834 *
835 * Updates the own multicast tvlv with our current multicast related settings,
836 * capabilities and inabilities.
837 */
838static void
839batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
840 struct batadv_mcast_mla_flags *flags)
841{
842 struct batadv_tvlv_mcast_data mcast_data;
843
844 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
845 return;
846
847 batadv_mcast_bridge_log(bat_priv, flags);
848 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
849
850 mcast_data.flags = flags->tvlv_flags;
851 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
852
853 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
854 &mcast_data, sizeof(mcast_data));
855
856 bat_priv->mcast.mla_flags = *flags;
857}
858
859/**
860 * __batadv_mcast_mla_update() - update the own MLAs
861 * @bat_priv: the bat priv with all the soft interface information
862 *
863 * Updates the own multicast listener announcements in the translation
864 * table as well as the own, announced multicast tvlv container.
865 *
866 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
867 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
868 * ensured by the non-parallel execution of the worker this function
869 * belongs to.
870 */
871static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
872{
873 struct net_device *soft_iface = bat_priv->soft_iface;
874 struct hlist_head mcast_list = HLIST_HEAD_INIT;
875 struct batadv_mcast_mla_flags flags;
876 int ret;
877
878 flags = batadv_mcast_mla_flags_get(bat_priv);
879
880 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
881 if (ret < 0)
882 goto out;
883
884 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
885 if (ret < 0)
886 goto out;
887
888 spin_lock(&bat_priv->mcast.mla_lock);
889 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
890 batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
891 batadv_mcast_mla_flags_update(bat_priv, &flags);
892 spin_unlock(&bat_priv->mcast.mla_lock);
893
894out:
895 batadv_mcast_mla_list_free(&mcast_list);
896}
897
898/**
899 * batadv_mcast_mla_update() - update the own MLAs
900 * @work: kernel work struct
901 *
902 * Updates the own multicast listener announcements in the translation
903 * table as well as the own, announced multicast tvlv container.
904 *
905 * In the end, reschedules the work timer.
906 */
907static void batadv_mcast_mla_update(struct work_struct *work)
908{
909 struct delayed_work *delayed_work;
910 struct batadv_priv_mcast *priv_mcast;
911 struct batadv_priv *bat_priv;
912
913 delayed_work = to_delayed_work(work);
914 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
915 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
916
917 __batadv_mcast_mla_update(bat_priv);
918 batadv_mcast_start_timer(bat_priv);
919}
920
921/**
922 * batadv_mcast_is_report_ipv4() - check for IGMP reports
923 * @skb: the ethernet frame destined for the mesh
924 *
925 * This call might reallocate skb data.
926 *
927 * Checks whether the given frame is a valid IGMP report.
928 *
929 * Return: If so then true, otherwise false.
930 */
931static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
932{
933 if (ip_mc_check_igmp(skb) < 0)
934 return false;
935
936 switch (igmp_hdr(skb)->type) {
937 case IGMP_HOST_MEMBERSHIP_REPORT:
938 case IGMPV2_HOST_MEMBERSHIP_REPORT:
939 case IGMPV3_HOST_MEMBERSHIP_REPORT:
940 return true;
941 }
942
943 return false;
944}
945
946/**
947 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
948 * potential
949 * @bat_priv: the bat priv with all the soft interface information
950 * @skb: the IPv4 packet to check
951 * @is_unsnoopable: stores whether the destination is snoopable
952 * @is_routable: stores whether the destination is routable
953 *
954 * Checks whether the given IPv4 packet has the potential to be forwarded with a
955 * mode more optimal than classic flooding.
956 *
957 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
958 * allocation failure.
959 */
960static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
961 struct sk_buff *skb,
962 bool *is_unsnoopable,
963 int *is_routable)
964{
965 struct iphdr *iphdr;
966
967 /* We might fail due to out-of-memory -> drop it */
968 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
969 return -ENOMEM;
970
971 if (batadv_mcast_is_report_ipv4(skb))
972 return -EINVAL;
973
974 iphdr = ip_hdr(skb);
975
976 /* link-local multicast listeners behind a bridge are
977 * not snoopable (see RFC4541, section 2.1.2.2)
978 */
979 if (ipv4_is_local_multicast(iphdr->daddr))
980 *is_unsnoopable = true;
981 else
982 *is_routable = ETH_P_IP;
983
984 return 0;
985}
986
987/**
988 * batadv_mcast_is_report_ipv6() - check for MLD reports
989 * @skb: the ethernet frame destined for the mesh
990 *
991 * This call might reallocate skb data.
992 *
993 * Checks whether the given frame is a valid MLD report.
994 *
995 * Return: If so then true, otherwise false.
996 */
997static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
998{
999 if (ipv6_mc_check_mld(skb) < 0)
1000 return false;
1001
1002 switch (icmp6_hdr(skb)->icmp6_type) {
1003 case ICMPV6_MGM_REPORT:
1004 case ICMPV6_MLD2_REPORT:
1005 return true;
1006 }
1007
1008 return false;
1009}
1010
1011/**
1012 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
1013 * potential
1014 * @bat_priv: the bat priv with all the soft interface information
1015 * @skb: the IPv6 packet to check
1016 * @is_unsnoopable: stores whether the destination is snoopable
1017 * @is_routable: stores whether the destination is routable
1018 *
1019 * Checks whether the given IPv6 packet has the potential to be forwarded with a
1020 * mode more optimal than classic flooding.
1021 *
1022 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1023 */
1024static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
1025 struct sk_buff *skb,
1026 bool *is_unsnoopable,
1027 int *is_routable)
1028{
1029 struct ipv6hdr *ip6hdr;
1030
1031 /* We might fail due to out-of-memory -> drop it */
1032 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
1033 return -ENOMEM;
1034
1035 if (batadv_mcast_is_report_ipv6(skb))
1036 return -EINVAL;
1037
1038 ip6hdr = ipv6_hdr(skb);
1039
1040 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1041 return -EINVAL;
1042
1043 /* link-local-all-nodes multicast listeners behind a bridge are
1044 * not snoopable (see RFC4541, section 3, paragraph 3)
1045 */
1046 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
1047 *is_unsnoopable = true;
1048 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL)
1049 *is_routable = ETH_P_IPV6;
1050
1051 return 0;
1052}
1053
1054/**
1055 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
1056 * @bat_priv: the bat priv with all the soft interface information
1057 * @skb: the multicast frame to check
1058 * @is_unsnoopable: stores whether the destination is snoopable
1059 * @is_routable: stores whether the destination is routable
1060 *
1061 * Checks whether the given multicast ethernet frame has the potential to be
1062 * forwarded with a mode more optimal than classic flooding.
1063 *
1064 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1065 */
1066static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
1067 struct sk_buff *skb,
1068 bool *is_unsnoopable,
1069 int *is_routable)
1070{
1071 struct ethhdr *ethhdr = eth_hdr(skb);
1072
1073 if (!atomic_read(&bat_priv->multicast_mode))
1074 return -EINVAL;
1075
1076 switch (ntohs(ethhdr->h_proto)) {
1077 case ETH_P_IP:
1078 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
1079 is_unsnoopable,
1080 is_routable);
1081 case ETH_P_IPV6:
1082 if (!IS_ENABLED(CONFIG_IPV6))
1083 return -EINVAL;
1084
1085 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
1086 is_unsnoopable,
1087 is_routable);
1088 default:
1089 return -EINVAL;
1090 }
1091}
1092
1093/**
1094 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
1095 * interest
1096 * @bat_priv: the bat priv with all the soft interface information
1097 * @ethhdr: ethernet header of a packet
1098 *
1099 * Return: the number of nodes which want all IPv4 multicast traffic if the
1100 * given ethhdr is from an IPv4 packet or the number of nodes which want all
1101 * IPv6 traffic if it matches an IPv6 packet.
1102 */
1103static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
1104 struct ethhdr *ethhdr)
1105{
1106 switch (ntohs(ethhdr->h_proto)) {
1107 case ETH_P_IP:
1108 return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
1109 case ETH_P_IPV6:
1110 return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
1111 default:
1112 /* we shouldn't be here... */
1113 return 0;
1114 }
1115}
1116
1117/**
1118 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router
1119 * @bat_priv: the bat priv with all the soft interface information
1120 * @protocol: the ethernet protocol type to count multicast routers for
1121 *
1122 * Return: the number of nodes which want all routable IPv4 multicast traffic
1123 * if the protocol is ETH_P_IP or the number of nodes which want all routable
1124 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0.
1125 */
1126
1127static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
1128 int protocol)
1129{
1130 switch (protocol) {
1131 case ETH_P_IP:
1132 return atomic_read(&bat_priv->mcast.num_want_all_rtr4);
1133 case ETH_P_IPV6:
1134 return atomic_read(&bat_priv->mcast.num_want_all_rtr6);
1135 default:
1136 return 0;
1137 }
1138}
1139
1140/**
1141 * batadv_mcast_forw_tt_node_get() - get a multicast tt node
1142 * @bat_priv: the bat priv with all the soft interface information
1143 * @ethhdr: the ether header containing the multicast destination
1144 *
1145 * Return: an orig_node matching the multicast address provided by ethhdr
1146 * via a translation table lookup. This increases the returned nodes refcount.
1147 */
1148static struct batadv_orig_node *
1149batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
1150 struct ethhdr *ethhdr)
1151{
1152 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
1153 BATADV_NO_FLAGS);
1154}
1155
1156/**
1157 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
1158 * @bat_priv: the bat priv with all the soft interface information
1159 *
1160 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
1161 * increases its refcount.
1162 */
1163static struct batadv_orig_node *
1164batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
1165{
1166 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1167
1168 rcu_read_lock();
1169 hlist_for_each_entry_rcu(tmp_orig_node,
1170 &bat_priv->mcast.want_all_ipv4_list,
1171 mcast_want_all_ipv4_node) {
1172 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1173 continue;
1174
1175 orig_node = tmp_orig_node;
1176 break;
1177 }
1178 rcu_read_unlock();
1179
1180 return orig_node;
1181}
1182
1183/**
1184 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
1185 * @bat_priv: the bat priv with all the soft interface information
1186 *
1187 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
1188 * and increases its refcount.
1189 */
1190static struct batadv_orig_node *
1191batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
1192{
1193 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1194
1195 rcu_read_lock();
1196 hlist_for_each_entry_rcu(tmp_orig_node,
1197 &bat_priv->mcast.want_all_ipv6_list,
1198 mcast_want_all_ipv6_node) {
1199 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1200 continue;
1201
1202 orig_node = tmp_orig_node;
1203 break;
1204 }
1205 rcu_read_unlock();
1206
1207 return orig_node;
1208}
1209
1210/**
1211 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
1212 * @bat_priv: the bat priv with all the soft interface information
1213 * @ethhdr: an ethernet header to determine the protocol family from
1214 *
1215 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
1216 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and
1217 * increases its refcount.
1218 */
1219static struct batadv_orig_node *
1220batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
1221 struct ethhdr *ethhdr)
1222{
1223 switch (ntohs(ethhdr->h_proto)) {
1224 case ETH_P_IP:
1225 return batadv_mcast_forw_ipv4_node_get(bat_priv);
1226 case ETH_P_IPV6:
1227 return batadv_mcast_forw_ipv6_node_get(bat_priv);
1228 default:
1229 /* we shouldn't be here... */
1230 return NULL;
1231 }
1232}
1233
1234/**
1235 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
1236 * @bat_priv: the bat priv with all the soft interface information
1237 *
1238 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
1239 * set and increases its refcount.
1240 */
1241static struct batadv_orig_node *
1242batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
1243{
1244 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1245
1246 rcu_read_lock();
1247 hlist_for_each_entry_rcu(tmp_orig_node,
1248 &bat_priv->mcast.want_all_unsnoopables_list,
1249 mcast_want_all_unsnoopables_node) {
1250 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1251 continue;
1252
1253 orig_node = tmp_orig_node;
1254 break;
1255 }
1256 rcu_read_unlock();
1257
1258 return orig_node;
1259}
1260
1261/**
1262 * batadv_mcast_forw_rtr4_node_get() - get a node with an ipv4 mcast router flag
1263 * @bat_priv: the bat priv with all the soft interface information
1264 *
1265 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR4 flag unset and
1266 * increases its refcount.
1267 */
1268static struct batadv_orig_node *
1269batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv)
1270{
1271 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1272
1273 rcu_read_lock();
1274 hlist_for_each_entry_rcu(tmp_orig_node,
1275 &bat_priv->mcast.want_all_rtr4_list,
1276 mcast_want_all_rtr4_node) {
1277 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1278 continue;
1279
1280 orig_node = tmp_orig_node;
1281 break;
1282 }
1283 rcu_read_unlock();
1284
1285 return orig_node;
1286}
1287
1288/**
1289 * batadv_mcast_forw_rtr6_node_get() - get a node with an ipv6 mcast router flag
1290 * @bat_priv: the bat priv with all the soft interface information
1291 *
1292 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR6 flag unset
1293 * and increases its refcount.
1294 */
1295static struct batadv_orig_node *
1296batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv)
1297{
1298 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1299
1300 rcu_read_lock();
1301 hlist_for_each_entry_rcu(tmp_orig_node,
1302 &bat_priv->mcast.want_all_rtr6_list,
1303 mcast_want_all_rtr6_node) {
1304 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1305 continue;
1306
1307 orig_node = tmp_orig_node;
1308 break;
1309 }
1310 rcu_read_unlock();
1311
1312 return orig_node;
1313}
1314
1315/**
1316 * batadv_mcast_forw_rtr_node_get() - get a node with an ipv4/ipv6 router flag
1317 * @bat_priv: the bat priv with all the soft interface information
1318 * @ethhdr: an ethernet header to determine the protocol family from
1319 *
1320 * Return: an orig_node which has no BATADV_MCAST_WANT_NO_RTR4 or
1321 * BATADV_MCAST_WANT_NO_RTR6 flag, depending on the provided ethhdr, set and
1322 * increases its refcount.
1323 */
1324static struct batadv_orig_node *
1325batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
1326 struct ethhdr *ethhdr)
1327{
1328 switch (ntohs(ethhdr->h_proto)) {
1329 case ETH_P_IP:
1330 return batadv_mcast_forw_rtr4_node_get(bat_priv);
1331 case ETH_P_IPV6:
1332 return batadv_mcast_forw_rtr6_node_get(bat_priv);
1333 default:
1334 /* we shouldn't be here... */
1335 return NULL;
1336 }
1337}
1338
1339/**
1340 * batadv_mcast_forw_mode() - check on how to forward a multicast packet
1341 * @bat_priv: the bat priv with all the soft interface information
1342 * @skb: The multicast packet to check
1343 * @orig: an originator to be set to forward the skb to
1344 *
1345 * Return: the forwarding mode as enum batadv_forw_mode and in case of
1346 * BATADV_FORW_SINGLE set the orig to the single originator the skb
1347 * should be forwarded to.
1348 */
1349enum batadv_forw_mode
1350batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
1351 struct batadv_orig_node **orig)
1352{
1353 int ret, tt_count, ip_count, unsnoop_count, total_count;
1354 bool is_unsnoopable = false;
1355 unsigned int mcast_fanout;
1356 struct ethhdr *ethhdr;
1357 int is_routable = 0;
1358 int rtr_count = 0;
1359
1360 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
1361 &is_routable);
1362 if (ret == -ENOMEM)
1363 return BATADV_FORW_NONE;
1364 else if (ret < 0)
1365 return BATADV_FORW_ALL;
1366
1367 ethhdr = eth_hdr(skb);
1368
1369 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
1370 BATADV_NO_FLAGS);
1371 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
1372 unsnoop_count = !is_unsnoopable ? 0 :
1373 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
1374 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
1375
1376 total_count = tt_count + ip_count + unsnoop_count + rtr_count;
1377
1378 switch (total_count) {
1379 case 1:
1380 if (tt_count)
1381 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
1382 else if (ip_count)
1383 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
1384 else if (unsnoop_count)
1385 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
1386 else if (rtr_count)
1387 *orig = batadv_mcast_forw_rtr_node_get(bat_priv,
1388 ethhdr);
1389
1390 if (*orig)
1391 return BATADV_FORW_SINGLE;
1392
1393 fallthrough;
1394 case 0:
1395 return BATADV_FORW_NONE;
1396 default:
1397 mcast_fanout = atomic_read(&bat_priv->multicast_fanout);
1398
1399 if (!unsnoop_count && total_count <= mcast_fanout)
1400 return BATADV_FORW_SOME;
1401 }
1402
1403 return BATADV_FORW_ALL;
1404}
1405
1406/**
1407 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator
1408 * @bat_priv: the bat priv with all the soft interface information
1409 * @skb: the multicast packet to send
1410 * @vid: the vlan identifier
1411 * @orig_node: the originator to send the packet to
1412 *
1413 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
1414 */
1415int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
1416 struct sk_buff *skb,
1417 unsigned short vid,
1418 struct batadv_orig_node *orig_node)
1419{
1420 /* Avoid sending multicast-in-unicast packets to other BLA
1421 * gateways - they already got the frame from the LAN side
1422 * we share with them.
1423 * TODO: Refactor to take BLA into account earlier, to avoid
1424 * reducing the mcast_fanout count.
1425 */
1426 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
1427 dev_kfree_skb(skb);
1428 return NET_XMIT_SUCCESS;
1429 }
1430
1431 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
1432 orig_node, vid);
1433}
1434
1435/**
1436 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
1437 * @bat_priv: the bat priv with all the soft interface information
1438 * @skb: the multicast packet to transmit
1439 * @vid: the vlan identifier
1440 *
1441 * Sends copies of a frame with multicast destination to any multicast
1442 * listener registered in the translation table. A transmission is performed
1443 * via a batman-adv unicast packet for each such destination node.
1444 *
1445 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1446 * otherwise.
1447 */
1448static int
1449batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1450 unsigned short vid)
1451{
1452 int ret = NET_XMIT_SUCCESS;
1453 struct sk_buff *newskb;
1454
1455 struct batadv_tt_orig_list_entry *orig_entry;
1456
1457 struct batadv_tt_global_entry *tt_global;
1458 const u8 *addr = eth_hdr(skb)->h_dest;
1459
1460 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1461 if (!tt_global)
1462 goto out;
1463
1464 rcu_read_lock();
1465 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1466 newskb = skb_copy(skb, GFP_ATOMIC);
1467 if (!newskb) {
1468 ret = NET_XMIT_DROP;
1469 break;
1470 }
1471
1472 batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
1473 orig_entry->orig_node);
1474 }
1475 rcu_read_unlock();
1476
1477 batadv_tt_global_entry_put(tt_global);
1478
1479out:
1480 return ret;
1481}
1482
1483/**
1484 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
1485 * @bat_priv: the bat priv with all the soft interface information
1486 * @skb: the multicast packet to transmit
1487 * @vid: the vlan identifier
1488 *
1489 * Sends copies of a frame with multicast destination to any node with a
1490 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
1491 * batman-adv unicast packet for each such destination node.
1492 *
1493 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1494 * otherwise.
1495 */
1496static int
1497batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1498 struct sk_buff *skb, unsigned short vid)
1499{
1500 struct batadv_orig_node *orig_node;
1501 int ret = NET_XMIT_SUCCESS;
1502 struct sk_buff *newskb;
1503
1504 rcu_read_lock();
1505 hlist_for_each_entry_rcu(orig_node,
1506 &bat_priv->mcast.want_all_ipv4_list,
1507 mcast_want_all_ipv4_node) {
1508 newskb = skb_copy(skb, GFP_ATOMIC);
1509 if (!newskb) {
1510 ret = NET_XMIT_DROP;
1511 break;
1512 }
1513
1514 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1515 }
1516 rcu_read_unlock();
1517 return ret;
1518}
1519
1520/**
1521 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
1522 * @bat_priv: the bat priv with all the soft interface information
1523 * @skb: The multicast packet to transmit
1524 * @vid: the vlan identifier
1525 *
1526 * Sends copies of a frame with multicast destination to any node with a
1527 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
1528 * batman-adv unicast packet for each such destination node.
1529 *
1530 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1531 * otherwise.
1532 */
1533static int
1534batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1535 struct sk_buff *skb, unsigned short vid)
1536{
1537 struct batadv_orig_node *orig_node;
1538 int ret = NET_XMIT_SUCCESS;
1539 struct sk_buff *newskb;
1540
1541 rcu_read_lock();
1542 hlist_for_each_entry_rcu(orig_node,
1543 &bat_priv->mcast.want_all_ipv6_list,
1544 mcast_want_all_ipv6_node) {
1545 newskb = skb_copy(skb, GFP_ATOMIC);
1546 if (!newskb) {
1547 ret = NET_XMIT_DROP;
1548 break;
1549 }
1550
1551 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1552 }
1553 rcu_read_unlock();
1554 return ret;
1555}
1556
1557/**
1558 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
1559 * @bat_priv: the bat priv with all the soft interface information
1560 * @skb: the multicast packet to transmit
1561 * @vid: the vlan identifier
1562 *
1563 * Sends copies of a frame with multicast destination to any node with a
1564 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
1565 * transmission is performed via a batman-adv unicast packet for each such
1566 * destination node.
1567 *
1568 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1569 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1570 */
1571static int
1572batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1573 struct sk_buff *skb, unsigned short vid)
1574{
1575 switch (ntohs(eth_hdr(skb)->h_proto)) {
1576 case ETH_P_IP:
1577 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1578 case ETH_P_IPV6:
1579 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1580 default:
1581 /* we shouldn't be here... */
1582 return NET_XMIT_DROP;
1583 }
1584}
1585
1586/**
1587 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4
1588 * @bat_priv: the bat priv with all the soft interface information
1589 * @skb: the multicast packet to transmit
1590 * @vid: the vlan identifier
1591 *
1592 * Sends copies of a frame with multicast destination to any node with a
1593 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a
1594 * batman-adv unicast packet for each such destination node.
1595 *
1596 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1597 * otherwise.
1598 */
1599static int
1600batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
1601 struct sk_buff *skb, unsigned short vid)
1602{
1603 struct batadv_orig_node *orig_node;
1604 int ret = NET_XMIT_SUCCESS;
1605 struct sk_buff *newskb;
1606
1607 rcu_read_lock();
1608 hlist_for_each_entry_rcu(orig_node,
1609 &bat_priv->mcast.want_all_rtr4_list,
1610 mcast_want_all_rtr4_node) {
1611 newskb = skb_copy(skb, GFP_ATOMIC);
1612 if (!newskb) {
1613 ret = NET_XMIT_DROP;
1614 break;
1615 }
1616
1617 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1618 }
1619 rcu_read_unlock();
1620 return ret;
1621}
1622
1623/**
1624 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6
1625 * @bat_priv: the bat priv with all the soft interface information
1626 * @skb: The multicast packet to transmit
1627 * @vid: the vlan identifier
1628 *
1629 * Sends copies of a frame with multicast destination to any node with a
1630 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a
1631 * batman-adv unicast packet for each such destination node.
1632 *
1633 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1634 * otherwise.
1635 */
1636static int
1637batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
1638 struct sk_buff *skb, unsigned short vid)
1639{
1640 struct batadv_orig_node *orig_node;
1641 int ret = NET_XMIT_SUCCESS;
1642 struct sk_buff *newskb;
1643
1644 rcu_read_lock();
1645 hlist_for_each_entry_rcu(orig_node,
1646 &bat_priv->mcast.want_all_rtr6_list,
1647 mcast_want_all_rtr6_node) {
1648 newskb = skb_copy(skb, GFP_ATOMIC);
1649 if (!newskb) {
1650 ret = NET_XMIT_DROP;
1651 break;
1652 }
1653
1654 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1655 }
1656 rcu_read_unlock();
1657 return ret;
1658}
1659
1660/**
1661 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list
1662 * @bat_priv: the bat priv with all the soft interface information
1663 * @skb: the multicast packet to transmit
1664 * @vid: the vlan identifier
1665 *
1666 * Sends copies of a frame with multicast destination to any node with a
1667 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A
1668 * transmission is performed via a batman-adv unicast packet for each such
1669 * destination node.
1670 *
1671 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1672 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1673 */
1674static int
1675batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
1676 struct sk_buff *skb, unsigned short vid)
1677{
1678 switch (ntohs(eth_hdr(skb)->h_proto)) {
1679 case ETH_P_IP:
1680 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
1681 case ETH_P_IPV6:
1682 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
1683 default:
1684 /* we shouldn't be here... */
1685 return NET_XMIT_DROP;
1686 }
1687}
1688
1689/**
1690 * batadv_mcast_forw_send() - send packet to any detected multicast recipient
1691 * @bat_priv: the bat priv with all the soft interface information
1692 * @skb: the multicast packet to transmit
1693 * @vid: the vlan identifier
1694 *
1695 * Sends copies of a frame with multicast destination to any node that signaled
1696 * interest in it, that is either via the translation table or the according
1697 * want-all flags. A transmission is performed via a batman-adv unicast packet
1698 * for each such destination node.
1699 *
1700 * The given skb is consumed/freed.
1701 *
1702 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1703 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1704 */
1705int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1706 unsigned short vid)
1707{
1708 int ret;
1709
1710 ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1711 if (ret != NET_XMIT_SUCCESS) {
1712 kfree_skb(skb);
1713 return ret;
1714 }
1715
1716 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1717 if (ret != NET_XMIT_SUCCESS) {
1718 kfree_skb(skb);
1719 return ret;
1720 }
1721
1722 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
1723 if (ret != NET_XMIT_SUCCESS) {
1724 kfree_skb(skb);
1725 return ret;
1726 }
1727
1728 consume_skb(skb);
1729 return ret;
1730}
1731
1732/**
1733 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
1734 * @bat_priv: the bat priv with all the soft interface information
1735 * @orig: the orig_node which multicast state might have changed of
1736 * @mcast_flags: flags indicating the new multicast state
1737 *
1738 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
1739 * orig, has toggled then this method updates the counter and the list
1740 * accordingly.
1741 *
1742 * Caller needs to hold orig->mcast_handler_lock.
1743 */
1744static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1745 struct batadv_orig_node *orig,
1746 u8 mcast_flags)
1747{
1748 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1749 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1750
1751 lockdep_assert_held(&orig->mcast_handler_lock);
1752
1753 /* switched from flag unset to set */
1754 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1755 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1756 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1757
1758 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1759 /* flag checks above + mcast_handler_lock prevents this */
1760 WARN_ON(!hlist_unhashed(node));
1761
1762 hlist_add_head_rcu(node, head);
1763 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1764 /* switched from flag set to unset */
1765 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1766 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1767 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1768
1769 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1770 /* flag checks above + mcast_handler_lock prevents this */
1771 WARN_ON(hlist_unhashed(node));
1772
1773 hlist_del_init_rcu(node);
1774 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1775 }
1776}
1777
1778/**
1779 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
1780 * @bat_priv: the bat priv with all the soft interface information
1781 * @orig: the orig_node which multicast state might have changed of
1782 * @mcast_flags: flags indicating the new multicast state
1783 *
1784 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
1785 * toggled then this method updates the counter and the list accordingly.
1786 *
1787 * Caller needs to hold orig->mcast_handler_lock.
1788 */
1789static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1790 struct batadv_orig_node *orig,
1791 u8 mcast_flags)
1792{
1793 struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1794 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1795
1796 lockdep_assert_held(&orig->mcast_handler_lock);
1797
1798 /* switched from flag unset to set */
1799 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1800 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1801 atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1802
1803 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1804 /* flag checks above + mcast_handler_lock prevents this */
1805 WARN_ON(!hlist_unhashed(node));
1806
1807 hlist_add_head_rcu(node, head);
1808 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1809 /* switched from flag set to unset */
1810 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1811 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1812 atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1813
1814 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1815 /* flag checks above + mcast_handler_lock prevents this */
1816 WARN_ON(hlist_unhashed(node));
1817
1818 hlist_del_init_rcu(node);
1819 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1820 }
1821}
1822
1823/**
1824 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
1825 * @bat_priv: the bat priv with all the soft interface information
1826 * @orig: the orig_node which multicast state might have changed of
1827 * @mcast_flags: flags indicating the new multicast state
1828 *
1829 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
1830 * toggled then this method updates the counter and the list accordingly.
1831 *
1832 * Caller needs to hold orig->mcast_handler_lock.
1833 */
1834static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1835 struct batadv_orig_node *orig,
1836 u8 mcast_flags)
1837{
1838 struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1839 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1840
1841 lockdep_assert_held(&orig->mcast_handler_lock);
1842
1843 /* switched from flag unset to set */
1844 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1845 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1846 atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1847
1848 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1849 /* flag checks above + mcast_handler_lock prevents this */
1850 WARN_ON(!hlist_unhashed(node));
1851
1852 hlist_add_head_rcu(node, head);
1853 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1854 /* switched from flag set to unset */
1855 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1856 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1857 atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1858
1859 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1860 /* flag checks above + mcast_handler_lock prevents this */
1861 WARN_ON(hlist_unhashed(node));
1862
1863 hlist_del_init_rcu(node);
1864 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1865 }
1866}
1867
1868/**
1869 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list
1870 * @bat_priv: the bat priv with all the soft interface information
1871 * @orig: the orig_node which multicast state might have changed of
1872 * @mcast_flags: flags indicating the new multicast state
1873 *
1874 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has
1875 * toggled then this method updates the counter and the list accordingly.
1876 *
1877 * Caller needs to hold orig->mcast_handler_lock.
1878 */
1879static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
1880 struct batadv_orig_node *orig,
1881 u8 mcast_flags)
1882{
1883 struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
1884 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list;
1885
1886 lockdep_assert_held(&orig->mcast_handler_lock);
1887
1888 /* switched from flag set to unset */
1889 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) &&
1890 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) {
1891 atomic_inc(&bat_priv->mcast.num_want_all_rtr4);
1892
1893 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1894 /* flag checks above + mcast_handler_lock prevents this */
1895 WARN_ON(!hlist_unhashed(node));
1896
1897 hlist_add_head_rcu(node, head);
1898 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1899 /* switched from flag unset to set */
1900 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 &&
1901 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) {
1902 atomic_dec(&bat_priv->mcast.num_want_all_rtr4);
1903
1904 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1905 /* flag checks above + mcast_handler_lock prevents this */
1906 WARN_ON(hlist_unhashed(node));
1907
1908 hlist_del_init_rcu(node);
1909 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1910 }
1911}
1912
1913/**
1914 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list
1915 * @bat_priv: the bat priv with all the soft interface information
1916 * @orig: the orig_node which multicast state might have changed of
1917 * @mcast_flags: flags indicating the new multicast state
1918 *
1919 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has
1920 * toggled then this method updates the counter and the list accordingly.
1921 *
1922 * Caller needs to hold orig->mcast_handler_lock.
1923 */
1924static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv,
1925 struct batadv_orig_node *orig,
1926 u8 mcast_flags)
1927{
1928 struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
1929 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list;
1930
1931 lockdep_assert_held(&orig->mcast_handler_lock);
1932
1933 /* switched from flag set to unset */
1934 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) &&
1935 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) {
1936 atomic_inc(&bat_priv->mcast.num_want_all_rtr6);
1937
1938 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1939 /* flag checks above + mcast_handler_lock prevents this */
1940 WARN_ON(!hlist_unhashed(node));
1941
1942 hlist_add_head_rcu(node, head);
1943 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1944 /* switched from flag unset to set */
1945 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 &&
1946 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) {
1947 atomic_dec(&bat_priv->mcast.num_want_all_rtr6);
1948
1949 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1950 /* flag checks above + mcast_handler_lock prevents this */
1951 WARN_ON(hlist_unhashed(node));
1952
1953 hlist_del_init_rcu(node);
1954 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1955 }
1956}
1957
1958/**
1959 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV
1960 * @enabled: whether the originator has multicast TVLV support enabled
1961 * @tvlv_value: tvlv buffer containing the multicast flags
1962 * @tvlv_value_len: tvlv buffer length
1963 *
1964 * Return: multicast flags for the given tvlv buffer
1965 */
1966static u8
1967batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len)
1968{
1969 u8 mcast_flags = BATADV_NO_FLAGS;
1970
1971 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags))
1972 mcast_flags = *(u8 *)tvlv_value;
1973
1974 if (!enabled) {
1975 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
1976 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
1977 }
1978
1979 /* remove redundant flags to avoid sending duplicate packets later */
1980 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)
1981 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4;
1982
1983 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)
1984 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
1985
1986 return mcast_flags;
1987}
1988
1989/**
1990 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
1991 * @bat_priv: the bat priv with all the soft interface information
1992 * @orig: the orig_node of the ogm
1993 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
1994 * @tvlv_value: tvlv buffer containing the multicast data
1995 * @tvlv_value_len: tvlv buffer length
1996 */
1997static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
1998 struct batadv_orig_node *orig,
1999 u8 flags,
2000 void *tvlv_value,
2001 u16 tvlv_value_len)
2002{
2003 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
2004 u8 mcast_flags;
2005
2006 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled,
2007 tvlv_value, tvlv_value_len);
2008
2009 spin_lock_bh(&orig->mcast_handler_lock);
2010
2011 if (orig_mcast_enabled &&
2012 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
2013 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
2014 } else if (!orig_mcast_enabled &&
2015 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
2016 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
2017 }
2018
2019 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
2020
2021 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
2022 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
2023 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
2024 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags);
2025 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags);
2026
2027 orig->mcast_flags = mcast_flags;
2028 spin_unlock_bh(&orig->mcast_handler_lock);
2029}
2030
2031/**
2032 * batadv_mcast_init() - initialize the multicast optimizations structures
2033 * @bat_priv: the bat priv with all the soft interface information
2034 */
2035void batadv_mcast_init(struct batadv_priv *bat_priv)
2036{
2037 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
2038 NULL, BATADV_TVLV_MCAST, 2,
2039 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
2040
2041 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
2042 batadv_mcast_start_timer(bat_priv);
2043}
2044
2045/**
2046 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
2047 * @msg: buffer for the message
2048 * @bat_priv: the bat priv with all the soft interface information
2049 *
2050 * Return: 0 or error code.
2051 */
2052int batadv_mcast_mesh_info_put(struct sk_buff *msg,
2053 struct batadv_priv *bat_priv)
2054{
2055 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
2056 u32 flags_priv = BATADV_NO_FLAGS;
2057
2058 if (bat_priv->mcast.mla_flags.bridged) {
2059 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
2060
2061 if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
2062 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
2063 if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
2064 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
2065 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
2066 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
2067 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
2068 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
2069 }
2070
2071 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
2072 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
2073 return -EMSGSIZE;
2074
2075 return 0;
2076}
2077
2078/**
2079 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
2080 * to a netlink socket
2081 * @msg: buffer for the message
2082 * @portid: netlink port
2083 * @cb: Control block containing additional options
2084 * @orig_node: originator to dump the multicast flags of
2085 *
2086 * Return: 0 or error code.
2087 */
2088static int
2089batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
2090 struct netlink_callback *cb,
2091 struct batadv_orig_node *orig_node)
2092{
2093 void *hdr;
2094
2095 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2096 &batadv_netlink_family, NLM_F_MULTI,
2097 BATADV_CMD_GET_MCAST_FLAGS);
2098 if (!hdr)
2099 return -ENOBUFS;
2100
2101 genl_dump_check_consistent(cb, hdr);
2102
2103 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
2104 orig_node->orig)) {
2105 genlmsg_cancel(msg, hdr);
2106 return -EMSGSIZE;
2107 }
2108
2109 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2110 &orig_node->capabilities)) {
2111 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
2112 orig_node->mcast_flags)) {
2113 genlmsg_cancel(msg, hdr);
2114 return -EMSGSIZE;
2115 }
2116 }
2117
2118 genlmsg_end(msg, hdr);
2119 return 0;
2120}
2121
2122/**
2123 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
2124 * table to a netlink socket
2125 * @msg: buffer for the message
2126 * @portid: netlink port
2127 * @cb: Control block containing additional options
2128 * @hash: hash to dump
2129 * @bucket: bucket index to dump
2130 * @idx_skip: How many entries to skip
2131 *
2132 * Return: 0 or error code.
2133 */
2134static int
2135batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
2136 struct netlink_callback *cb,
2137 struct batadv_hashtable *hash,
2138 unsigned int bucket, long *idx_skip)
2139{
2140 struct batadv_orig_node *orig_node;
2141 long idx = 0;
2142
2143 spin_lock_bh(&hash->list_locks[bucket]);
2144 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2145
2146 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
2147 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2148 &orig_node->capa_initialized))
2149 continue;
2150
2151 if (idx < *idx_skip)
2152 goto skip;
2153
2154 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
2155 spin_unlock_bh(&hash->list_locks[bucket]);
2156 *idx_skip = idx;
2157
2158 return -EMSGSIZE;
2159 }
2160
2161skip:
2162 idx++;
2163 }
2164 spin_unlock_bh(&hash->list_locks[bucket]);
2165
2166 return 0;
2167}
2168
2169/**
2170 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
2171 * @msg: buffer for the message
2172 * @portid: netlink port
2173 * @cb: Control block containing additional options
2174 * @bat_priv: the bat priv with all the soft interface information
2175 * @bucket: current bucket to dump
2176 * @idx: index in current bucket to the next entry to dump
2177 *
2178 * Return: 0 or error code.
2179 */
2180static int
2181__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2182 struct netlink_callback *cb,
2183 struct batadv_priv *bat_priv, long *bucket, long *idx)
2184{
2185 struct batadv_hashtable *hash = bat_priv->orig_hash;
2186 long bucket_tmp = *bucket;
2187 long idx_tmp = *idx;
2188
2189 while (bucket_tmp < hash->size) {
2190 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2191 bucket_tmp, &idx_tmp))
2192 break;
2193
2194 bucket_tmp++;
2195 idx_tmp = 0;
2196 }
2197
2198 *bucket = bucket_tmp;
2199 *idx = idx_tmp;
2200
2201 return msg->len;
2202}
2203
2204/**
2205 * batadv_mcast_netlink_get_primary() - get primary interface from netlink
2206 * callback
2207 * @cb: netlink callback structure
2208 * @primary_if: the primary interface pointer to return the result in
2209 *
2210 * Return: 0 or error code.
2211 */
2212static int
2213batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
2214 struct batadv_hard_iface **primary_if)
2215{
2216 struct batadv_hard_iface *hard_iface = NULL;
2217 struct net *net = sock_net(cb->skb->sk);
2218 struct net_device *soft_iface;
2219 struct batadv_priv *bat_priv;
2220 int ifindex;
2221 int ret = 0;
2222
2223 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
2224 if (!ifindex)
2225 return -EINVAL;
2226
2227 soft_iface = dev_get_by_index(net, ifindex);
2228 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2229 ret = -ENODEV;
2230 goto out;
2231 }
2232
2233 bat_priv = netdev_priv(soft_iface);
2234
2235 hard_iface = batadv_primary_if_get_selected(bat_priv);
2236 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
2237 ret = -ENOENT;
2238 goto out;
2239 }
2240
2241out:
2242 if (soft_iface)
2243 dev_put(soft_iface);
2244
2245 if (!ret && primary_if)
2246 *primary_if = hard_iface;
2247 else if (hard_iface)
2248 batadv_hardif_put(hard_iface);
2249
2250 return ret;
2251}
2252
2253/**
2254 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
2255 * @msg: buffer for the message
2256 * @cb: callback structure containing arguments
2257 *
2258 * Return: message length.
2259 */
2260int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
2261{
2262 struct batadv_hard_iface *primary_if = NULL;
2263 int portid = NETLINK_CB(cb->skb).portid;
2264 struct batadv_priv *bat_priv;
2265 long *bucket = &cb->args[0];
2266 long *idx = &cb->args[1];
2267 int ret;
2268
2269 ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
2270 if (ret)
2271 return ret;
2272
2273 bat_priv = netdev_priv(primary_if->soft_iface);
2274 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
2275
2276 batadv_hardif_put(primary_if);
2277 return ret;
2278}
2279
2280/**
2281 * batadv_mcast_free() - free the multicast optimizations structures
2282 * @bat_priv: the bat priv with all the soft interface information
2283 */
2284void batadv_mcast_free(struct batadv_priv *bat_priv)
2285{
2286 cancel_delayed_work_sync(&bat_priv->mcast.work);
2287
2288 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2289 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2290
2291 /* safely calling outside of worker, as worker was canceled above */
2292 batadv_mcast_mla_tt_retract(bat_priv, NULL);
2293}
2294
2295/**
2296 * batadv_mcast_purge_orig() - reset originator global mcast state modifications
2297 * @orig: the originator which is going to get purged
2298 */
2299void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2300{
2301 struct batadv_priv *bat_priv = orig->bat_priv;
2302
2303 spin_lock_bh(&orig->mcast_handler_lock);
2304
2305 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2306 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2307 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2308 batadv_mcast_want_rtr4_update(bat_priv, orig,
2309 BATADV_MCAST_WANT_NO_RTR4);
2310 batadv_mcast_want_rtr6_update(bat_priv, orig,
2311 BATADV_MCAST_WANT_NO_RTR6);
2312
2313 spin_unlock_bh(&orig->mcast_handler_lock);
2314}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) B.A.T.M.A.N. contributors:
3 *
4 * Linus Lüssing
5 */
6
7#include "multicast.h"
8#include "main.h"
9
10#include <linux/atomic.h>
11#include <linux/bitops.h>
12#include <linux/bug.h>
13#include <linux/byteorder/generic.h>
14#include <linux/container_of.h>
15#include <linux/errno.h>
16#include <linux/etherdevice.h>
17#include <linux/gfp.h>
18#include <linux/icmpv6.h>
19#include <linux/if_bridge.h>
20#include <linux/if_ether.h>
21#include <linux/igmp.h>
22#include <linux/in.h>
23#include <linux/in6.h>
24#include <linux/inetdevice.h>
25#include <linux/ip.h>
26#include <linux/ipv6.h>
27#include <linux/jiffies.h>
28#include <linux/list.h>
29#include <linux/lockdep.h>
30#include <linux/netdevice.h>
31#include <linux/netlink.h>
32#include <linux/printk.h>
33#include <linux/rculist.h>
34#include <linux/rcupdate.h>
35#include <linux/skbuff.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/sprintf.h>
39#include <linux/stddef.h>
40#include <linux/string.h>
41#include <linux/types.h>
42#include <linux/workqueue.h>
43#include <net/addrconf.h>
44#include <net/genetlink.h>
45#include <net/if_inet6.h>
46#include <net/ip.h>
47#include <net/ipv6.h>
48#include <net/netlink.h>
49#include <net/sock.h>
50#include <uapi/linux/batadv_packet.h>
51#include <uapi/linux/batman_adv.h>
52
53#include "bridge_loop_avoidance.h"
54#include "hard-interface.h"
55#include "hash.h"
56#include "log.h"
57#include "netlink.h"
58#include "send.h"
59#include "soft-interface.h"
60#include "translation-table.h"
61#include "tvlv.h"
62
63static void batadv_mcast_mla_update(struct work_struct *work);
64
65/**
66 * batadv_mcast_start_timer() - schedule the multicast periodic worker
67 * @bat_priv: the bat priv with all the soft interface information
68 */
69static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
70{
71 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
72 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
73}
74
75/**
76 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
77 * @soft_iface: netdev struct of the mesh interface
78 *
79 * If the given soft interface has a bridge on top then the refcount
80 * of the according net device is increased.
81 *
82 * Return: NULL if no such bridge exists. Otherwise the net device of the
83 * bridge.
84 */
85static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
86{
87 struct net_device *upper = soft_iface;
88
89 rcu_read_lock();
90 do {
91 upper = netdev_master_upper_dev_get_rcu(upper);
92 } while (upper && !netif_is_bridge_master(upper));
93
94 dev_hold(upper);
95 rcu_read_unlock();
96
97 return upper;
98}
99
100/**
101 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from
102 * node for IPv4
103 * @dev: the interface to check
104 *
105 * Checks the presence of an IPv4 multicast router on this node.
106 *
107 * Caller needs to hold rcu read lock.
108 *
109 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise.
110 */
111static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev)
112{
113 struct in_device *in_dev = __in_dev_get_rcu(dev);
114
115 if (in_dev && IN_DEV_MFORWARD(in_dev))
116 return BATADV_NO_FLAGS;
117 else
118 return BATADV_MCAST_WANT_NO_RTR4;
119}
120
121/**
122 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from
123 * node for IPv6
124 * @dev: the interface to check
125 *
126 * Checks the presence of an IPv6 multicast router on this node.
127 *
128 * Caller needs to hold rcu read lock.
129 *
130 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise.
131 */
132#if IS_ENABLED(CONFIG_IPV6_MROUTE)
133static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
134{
135 struct inet6_dev *in6_dev = __in6_dev_get(dev);
136
137 if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding))
138 return BATADV_NO_FLAGS;
139 else
140 return BATADV_MCAST_WANT_NO_RTR6;
141}
142#else
143static inline u8
144batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
145{
146 return BATADV_MCAST_WANT_NO_RTR6;
147}
148#endif
149
150/**
151 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node
152 * @bat_priv: the bat priv with all the soft interface information
153 * @bridge: bridge interface on top of the soft_iface if present,
154 * otherwise pass NULL
155 *
156 * Checks the presence of IPv4 and IPv6 multicast routers on this
157 * node.
158 *
159 * Return:
160 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
161 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
162 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
163 * The former two OR'd: no multicast router is present
164 */
165static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
166 struct net_device *bridge)
167{
168 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface;
169 u8 flags = BATADV_NO_FLAGS;
170
171 rcu_read_lock();
172
173 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev);
174 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev);
175
176 rcu_read_unlock();
177
178 return flags;
179}
180
181/**
182 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge
183 * @bat_priv: the bat priv with all the soft interface information
184 * @bridge: bridge interface on top of the soft_iface if present,
185 * otherwise pass NULL
186 *
187 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge.
188 *
189 * Return:
190 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
191 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
192 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
193 * The former two OR'd: no multicast router is present
194 */
195static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
196 struct net_device *bridge)
197{
198 struct net_device *dev = bat_priv->soft_iface;
199 u8 flags = BATADV_NO_FLAGS;
200
201 if (!bridge)
202 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
203
204 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP))
205 flags |= BATADV_MCAST_WANT_NO_RTR4;
206 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6))
207 flags |= BATADV_MCAST_WANT_NO_RTR6;
208
209 return flags;
210}
211
212/**
213 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags
214 * @bat_priv: the bat priv with all the soft interface information
215 * @bridge: bridge interface on top of the soft_iface if present,
216 * otherwise pass NULL
217 *
218 * Checks the presence of IPv4 and IPv6 multicast routers on this
219 * node or behind its bridge.
220 *
221 * Return:
222 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
223 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
224 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
225 * The former two OR'd: no multicast router is present
226 */
227static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
228 struct net_device *bridge)
229{
230 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
231
232 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge);
233 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge);
234
235 return flags;
236}
237
238/**
239 * batadv_mcast_mla_forw_flags_get() - get multicast forwarding flags
240 * @bat_priv: the bat priv with all the soft interface information
241 *
242 * Checks if all active hard interfaces have an MTU larger or equal to 1280
243 * bytes (IPv6 minimum MTU).
244 *
245 * Return: BATADV_MCAST_HAVE_MC_PTYPE_CAPA if yes, BATADV_NO_FLAGS otherwise.
246 */
247static u8 batadv_mcast_mla_forw_flags_get(struct batadv_priv *bat_priv)
248{
249 const struct batadv_hard_iface *hard_iface;
250
251 rcu_read_lock();
252 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
253 if (hard_iface->if_status != BATADV_IF_ACTIVE)
254 continue;
255
256 if (hard_iface->soft_iface != bat_priv->soft_iface)
257 continue;
258
259 if (hard_iface->net_dev->mtu < IPV6_MIN_MTU) {
260 rcu_read_unlock();
261 return BATADV_NO_FLAGS;
262 }
263 }
264 rcu_read_unlock();
265
266 return BATADV_MCAST_HAVE_MC_PTYPE_CAPA;
267}
268
269/**
270 * batadv_mcast_mla_flags_get() - get the new multicast flags
271 * @bat_priv: the bat priv with all the soft interface information
272 *
273 * Return: A set of flags for the current/next TVLV, querier and
274 * bridge state.
275 */
276static struct batadv_mcast_mla_flags
277batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
278{
279 struct net_device *dev = bat_priv->soft_iface;
280 struct batadv_mcast_querier_state *qr4, *qr6;
281 struct batadv_mcast_mla_flags mla_flags;
282 struct net_device *bridge;
283
284 bridge = batadv_mcast_get_bridge(dev);
285
286 memset(&mla_flags, 0, sizeof(mla_flags));
287 mla_flags.enabled = 1;
288 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv,
289 bridge);
290 mla_flags.tvlv_flags |= batadv_mcast_mla_forw_flags_get(bat_priv);
291
292 if (!bridge)
293 return mla_flags;
294
295 dev_put(bridge);
296
297 mla_flags.bridged = 1;
298 qr4 = &mla_flags.querier_ipv4;
299 qr6 = &mla_flags.querier_ipv6;
300
301 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
302 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
303
304 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
305 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
306
307 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
308 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
309
310 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
311
312 /* 1) If no querier exists at all, then multicast listeners on
313 * our local TT clients behind the bridge will keep silent.
314 * 2) If the selected querier is on one of our local TT clients,
315 * behind the bridge, then this querier might shadow multicast
316 * listeners on our local TT clients, behind this bridge.
317 *
318 * In both cases, we will signalize other batman nodes that
319 * we need all multicast traffic of the according protocol.
320 */
321 if (!qr4->exists || qr4->shadowing) {
322 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
323 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4;
324 }
325
326 if (!qr6->exists || qr6->shadowing) {
327 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
328 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6;
329 }
330
331 return mla_flags;
332}
333
334/**
335 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
336 * @mcast_addr: the multicast address to check
337 * @mcast_list: the list with multicast addresses to search in
338 *
339 * Return: true if the given address is already in the given list.
340 * Otherwise returns false.
341 */
342static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
343 struct hlist_head *mcast_list)
344{
345 struct batadv_hw_addr *mcast_entry;
346
347 hlist_for_each_entry(mcast_entry, mcast_list, list)
348 if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
349 return true;
350
351 return false;
352}
353
354/**
355 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners
356 * @dev: the device to collect multicast addresses from
357 * @mcast_list: a list to put found addresses into
358 * @flags: flags indicating the new multicast state
359 *
360 * Collects multicast addresses of IPv4 multicast listeners residing
361 * on this kernel on the given soft interface, dev, in
362 * the given mcast_list. In general, multicast listeners provided by
363 * your multicast receiving applications run directly on this node.
364 *
365 * Return: -ENOMEM on memory allocation error or the number of
366 * items added to the mcast_list otherwise.
367 */
368static int
369batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
370 struct hlist_head *mcast_list,
371 struct batadv_mcast_mla_flags *flags)
372{
373 struct batadv_hw_addr *new;
374 struct in_device *in_dev;
375 u8 mcast_addr[ETH_ALEN];
376 struct ip_mc_list *pmc;
377 int ret = 0;
378
379 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
380 return 0;
381
382 rcu_read_lock();
383
384 in_dev = __in_dev_get_rcu(dev);
385 if (!in_dev) {
386 rcu_read_unlock();
387 return 0;
388 }
389
390 for (pmc = rcu_dereference(in_dev->mc_list); pmc;
391 pmc = rcu_dereference(pmc->next_rcu)) {
392 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
393 ipv4_is_local_multicast(pmc->multiaddr))
394 continue;
395
396 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
397 !ipv4_is_local_multicast(pmc->multiaddr))
398 continue;
399
400 ip_eth_mc_map(pmc->multiaddr, mcast_addr);
401
402 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
403 continue;
404
405 new = kmalloc(sizeof(*new), GFP_ATOMIC);
406 if (!new) {
407 ret = -ENOMEM;
408 break;
409 }
410
411 ether_addr_copy(new->addr, mcast_addr);
412 hlist_add_head(&new->list, mcast_list);
413 ret++;
414 }
415 rcu_read_unlock();
416
417 return ret;
418}
419
420/**
421 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners
422 * @dev: the device to collect multicast addresses from
423 * @mcast_list: a list to put found addresses into
424 * @flags: flags indicating the new multicast state
425 *
426 * Collects multicast addresses of IPv6 multicast listeners residing
427 * on this kernel on the given soft interface, dev, in
428 * the given mcast_list. In general, multicast listeners provided by
429 * your multicast receiving applications run directly on this node.
430 *
431 * Return: -ENOMEM on memory allocation error or the number of
432 * items added to the mcast_list otherwise.
433 */
434#if IS_ENABLED(CONFIG_IPV6)
435static int
436batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
437 struct hlist_head *mcast_list,
438 struct batadv_mcast_mla_flags *flags)
439{
440 struct batadv_hw_addr *new;
441 struct inet6_dev *in6_dev;
442 u8 mcast_addr[ETH_ALEN];
443 struct ifmcaddr6 *pmc6;
444 int ret = 0;
445
446 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
447 return 0;
448
449 rcu_read_lock();
450
451 in6_dev = __in6_dev_get(dev);
452 if (!in6_dev) {
453 rcu_read_unlock();
454 return 0;
455 }
456
457 for (pmc6 = rcu_dereference(in6_dev->mc_list);
458 pmc6;
459 pmc6 = rcu_dereference(pmc6->next)) {
460 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
461 IPV6_ADDR_SCOPE_LINKLOCAL)
462 continue;
463
464 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
465 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr))
466 continue;
467
468 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
469 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) >
470 IPV6_ADDR_SCOPE_LINKLOCAL)
471 continue;
472
473 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
474
475 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
476 continue;
477
478 new = kmalloc(sizeof(*new), GFP_ATOMIC);
479 if (!new) {
480 ret = -ENOMEM;
481 break;
482 }
483
484 ether_addr_copy(new->addr, mcast_addr);
485 hlist_add_head(&new->list, mcast_list);
486 ret++;
487 }
488 rcu_read_unlock();
489
490 return ret;
491}
492#else
493static inline int
494batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
495 struct hlist_head *mcast_list,
496 struct batadv_mcast_mla_flags *flags)
497{
498 return 0;
499}
500#endif
501
502/**
503 * batadv_mcast_mla_softif_get() - get softif multicast listeners
504 * @dev: the device to collect multicast addresses from
505 * @mcast_list: a list to put found addresses into
506 * @flags: flags indicating the new multicast state
507 *
508 * Collects multicast addresses of multicast listeners residing
509 * on this kernel on the given soft interface, dev, in
510 * the given mcast_list. In general, multicast listeners provided by
511 * your multicast receiving applications run directly on this node.
512 *
513 * If there is a bridge interface on top of dev, collect from that one
514 * instead. Just like with IP addresses and routes, multicast listeners
515 * will(/should) register to the bridge interface instead of an
516 * enslaved bat0.
517 *
518 * Return: -ENOMEM on memory allocation error or the number of
519 * items added to the mcast_list otherwise.
520 */
521static int
522batadv_mcast_mla_softif_get(struct net_device *dev,
523 struct hlist_head *mcast_list,
524 struct batadv_mcast_mla_flags *flags)
525{
526 struct net_device *bridge = batadv_mcast_get_bridge(dev);
527 int ret4, ret6 = 0;
528
529 if (bridge)
530 dev = bridge;
531
532 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
533 if (ret4 < 0)
534 goto out;
535
536 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
537 if (ret6 < 0) {
538 ret4 = 0;
539 goto out;
540 }
541
542out:
543 dev_put(bridge);
544
545 return ret4 + ret6;
546}
547
548/**
549 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
550 * @dst: destination to write to - a multicast MAC address
551 * @src: source to read from - a multicast IP address
552 *
553 * Converts a given multicast IPv4/IPv6 address from a bridge
554 * to its matching multicast MAC address and copies it into the given
555 * destination buffer.
556 *
557 * Caller needs to make sure the destination buffer can hold
558 * at least ETH_ALEN bytes.
559 */
560static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
561{
562 if (src->proto == htons(ETH_P_IP))
563 ip_eth_mc_map(src->dst.ip4, dst);
564#if IS_ENABLED(CONFIG_IPV6)
565 else if (src->proto == htons(ETH_P_IPV6))
566 ipv6_eth_mc_map(&src->dst.ip6, dst);
567#endif
568 else
569 eth_zero_addr(dst);
570}
571
572/**
573 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
574 * @dev: a bridge slave whose bridge to collect multicast addresses from
575 * @mcast_list: a list to put found addresses into
576 * @flags: flags indicating the new multicast state
577 *
578 * Collects multicast addresses of multicast listeners residing
579 * on foreign, non-mesh devices which we gave access to our mesh via
580 * a bridge on top of the given soft interface, dev, in the given
581 * mcast_list.
582 *
583 * Return: -ENOMEM on memory allocation error or the number of
584 * items added to the mcast_list otherwise.
585 */
586static int batadv_mcast_mla_bridge_get(struct net_device *dev,
587 struct hlist_head *mcast_list,
588 struct batadv_mcast_mla_flags *flags)
589{
590 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
591 struct br_ip_list *br_ip_entry, *tmp;
592 u8 tvlv_flags = flags->tvlv_flags;
593 struct batadv_hw_addr *new;
594 u8 mcast_addr[ETH_ALEN];
595 int ret;
596
597 /* we don't need to detect these devices/listeners, the IGMP/MLD
598 * snooping code of the Linux bridge already does that for us
599 */
600 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
601 if (ret < 0)
602 goto out;
603
604 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
605 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) {
606 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
607 continue;
608
609 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
610 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
611 continue;
612
613 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
614 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
615 continue;
616 }
617
618#if IS_ENABLED(CONFIG_IPV6)
619 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) {
620 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
621 continue;
622
623 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
624 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
625 continue;
626
627 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
628 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
629 IPV6_ADDR_SCOPE_LINKLOCAL)
630 continue;
631 }
632#endif
633
634 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
635 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
636 continue;
637
638 new = kmalloc(sizeof(*new), GFP_ATOMIC);
639 if (!new) {
640 ret = -ENOMEM;
641 break;
642 }
643
644 ether_addr_copy(new->addr, mcast_addr);
645 hlist_add_head(&new->list, mcast_list);
646 }
647
648out:
649 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
650 list_del(&br_ip_entry->list);
651 kfree(br_ip_entry);
652 }
653
654 return ret;
655}
656
657/**
658 * batadv_mcast_mla_list_free() - free a list of multicast addresses
659 * @mcast_list: the list to free
660 *
661 * Removes and frees all items in the given mcast_list.
662 */
663static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
664{
665 struct batadv_hw_addr *mcast_entry;
666 struct hlist_node *tmp;
667
668 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
669 hlist_del(&mcast_entry->list);
670 kfree(mcast_entry);
671 }
672}
673
674/**
675 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
676 * @bat_priv: the bat priv with all the soft interface information
677 * @mcast_list: a list of addresses which should _not_ be removed
678 *
679 * Retracts the announcement of any multicast listener from the
680 * translation table except the ones listed in the given mcast_list.
681 *
682 * If mcast_list is NULL then all are retracted.
683 */
684static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
685 struct hlist_head *mcast_list)
686{
687 struct batadv_hw_addr *mcast_entry;
688 struct hlist_node *tmp;
689
690 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
691 list) {
692 if (mcast_list &&
693 batadv_mcast_mla_is_duplicate(mcast_entry->addr,
694 mcast_list))
695 continue;
696
697 batadv_tt_local_remove(bat_priv, mcast_entry->addr,
698 BATADV_NO_FLAGS,
699 "mcast TT outdated", false);
700
701 hlist_del(&mcast_entry->list);
702 kfree(mcast_entry);
703 }
704}
705
706/**
707 * batadv_mcast_mla_tt_add() - add multicast listener announcements
708 * @bat_priv: the bat priv with all the soft interface information
709 * @mcast_list: a list of addresses which are going to get added
710 *
711 * Adds multicast listener announcements from the given mcast_list to the
712 * translation table if they have not been added yet.
713 */
714static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
715 struct hlist_head *mcast_list)
716{
717 struct batadv_hw_addr *mcast_entry;
718 struct hlist_node *tmp;
719
720 if (!mcast_list)
721 return;
722
723 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
724 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
725 &bat_priv->mcast.mla_list))
726 continue;
727
728 if (!batadv_tt_local_add(bat_priv->soft_iface,
729 mcast_entry->addr, BATADV_NO_FLAGS,
730 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
731 continue;
732
733 hlist_del(&mcast_entry->list);
734 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
735 }
736}
737
738/**
739 * batadv_mcast_querier_log() - debug output regarding the querier status on
740 * link
741 * @bat_priv: the bat priv with all the soft interface information
742 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
743 * @old_state: the previous querier state on our link
744 * @new_state: the new querier state on our link
745 *
746 * Outputs debug messages to the logging facility with log level 'mcast'
747 * regarding changes to the querier status on the link which are relevant
748 * to our multicast optimizations.
749 *
750 * Usually this is about whether a querier appeared or vanished in
751 * our mesh or whether the querier is in the suboptimal position of being
752 * behind our local bridge segment: Snooping switches will directly
753 * forward listener reports to the querier, therefore batman-adv and
754 * the bridge will potentially not see these listeners - the querier is
755 * potentially shadowing listeners from us then.
756 *
757 * This is only interesting for nodes with a bridge on top of their
758 * soft interface.
759 */
760static void
761batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
762 struct batadv_mcast_querier_state *old_state,
763 struct batadv_mcast_querier_state *new_state)
764{
765 if (!old_state->exists && new_state->exists)
766 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
767 str_proto);
768 else if (old_state->exists && !new_state->exists)
769 batadv_info(bat_priv->soft_iface,
770 "%s Querier disappeared - multicast optimizations disabled\n",
771 str_proto);
772 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
773 batadv_info(bat_priv->soft_iface,
774 "No %s Querier present - multicast optimizations disabled\n",
775 str_proto);
776
777 if (new_state->exists) {
778 if ((!old_state->shadowing && new_state->shadowing) ||
779 (!old_state->exists && new_state->shadowing))
780 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
781 "%s Querier is behind our bridged segment: Might shadow listeners\n",
782 str_proto);
783 else if (old_state->shadowing && !new_state->shadowing)
784 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
785 "%s Querier is not behind our bridged segment\n",
786 str_proto);
787 }
788}
789
790/**
791 * batadv_mcast_bridge_log() - debug output for topology changes in bridged
792 * setups
793 * @bat_priv: the bat priv with all the soft interface information
794 * @new_flags: flags indicating the new multicast state
795 *
796 * If no bridges are ever used on this node, then this function does nothing.
797 *
798 * Otherwise this function outputs debug information to the 'mcast' log level
799 * which might be relevant to our multicast optimizations.
800 *
801 * More precisely, it outputs information when a bridge interface is added or
802 * removed from a soft interface. And when a bridge is present, it further
803 * outputs information about the querier state which is relevant for the
804 * multicast flags this node is going to set.
805 */
806static void
807batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
808 struct batadv_mcast_mla_flags *new_flags)
809{
810 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
811
812 if (!old_flags->bridged && new_flags->bridged)
813 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
814 "Bridge added: Setting Unsnoopables(U)-flag\n");
815 else if (old_flags->bridged && !new_flags->bridged)
816 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
817 "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
818
819 if (new_flags->bridged) {
820 batadv_mcast_querier_log(bat_priv, "IGMP",
821 &old_flags->querier_ipv4,
822 &new_flags->querier_ipv4);
823 batadv_mcast_querier_log(bat_priv, "MLD",
824 &old_flags->querier_ipv6,
825 &new_flags->querier_ipv6);
826 }
827}
828
829/**
830 * batadv_mcast_flags_log() - output debug information about mcast flag changes
831 * @bat_priv: the bat priv with all the soft interface information
832 * @flags: TVLV flags indicating the new multicast state
833 *
834 * Whenever the multicast TVLV flags this node announces change, this function
835 * should be used to notify userspace about the change.
836 */
837static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
838{
839 bool old_enabled = bat_priv->mcast.mla_flags.enabled;
840 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
841 char str_old_flags[] = "[.... . .]";
842
843 sprintf(str_old_flags, "[%c%c%c%s%s%c]",
844 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
845 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
846 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
847 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
848 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ",
849 !(old_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) ? 'P' : '.');
850
851 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
852 "Changing multicast flags from '%s' to '[%c%c%c%s%s%c]'\n",
853 old_enabled ? str_old_flags : "<undefined>",
854 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
855 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
856 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
857 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
858 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ",
859 !(flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) ? 'P' : '.');
860}
861
862/**
863 * batadv_mcast_mla_flags_update() - update multicast flags
864 * @bat_priv: the bat priv with all the soft interface information
865 * @flags: flags indicating the new multicast state
866 *
867 * Updates the own multicast tvlv with our current multicast related settings,
868 * capabilities and inabilities.
869 */
870static void
871batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
872 struct batadv_mcast_mla_flags *flags)
873{
874 struct batadv_tvlv_mcast_data mcast_data;
875
876 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
877 return;
878
879 batadv_mcast_bridge_log(bat_priv, flags);
880 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
881
882 mcast_data.flags = flags->tvlv_flags;
883 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
884
885 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
886 &mcast_data, sizeof(mcast_data));
887
888 bat_priv->mcast.mla_flags = *flags;
889}
890
891/**
892 * __batadv_mcast_mla_update() - update the own MLAs
893 * @bat_priv: the bat priv with all the soft interface information
894 *
895 * Updates the own multicast listener announcements in the translation
896 * table as well as the own, announced multicast tvlv container.
897 *
898 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
899 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
900 * ensured by the non-parallel execution of the worker this function
901 * belongs to.
902 */
903static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
904{
905 struct net_device *soft_iface = bat_priv->soft_iface;
906 struct hlist_head mcast_list = HLIST_HEAD_INIT;
907 struct batadv_mcast_mla_flags flags;
908 int ret;
909
910 flags = batadv_mcast_mla_flags_get(bat_priv);
911
912 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
913 if (ret < 0)
914 goto out;
915
916 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
917 if (ret < 0)
918 goto out;
919
920 spin_lock(&bat_priv->mcast.mla_lock);
921 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
922 batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
923 batadv_mcast_mla_flags_update(bat_priv, &flags);
924 spin_unlock(&bat_priv->mcast.mla_lock);
925
926out:
927 batadv_mcast_mla_list_free(&mcast_list);
928}
929
930/**
931 * batadv_mcast_mla_update() - update the own MLAs
932 * @work: kernel work struct
933 *
934 * Updates the own multicast listener announcements in the translation
935 * table as well as the own, announced multicast tvlv container.
936 *
937 * In the end, reschedules the work timer.
938 */
939static void batadv_mcast_mla_update(struct work_struct *work)
940{
941 struct delayed_work *delayed_work;
942 struct batadv_priv_mcast *priv_mcast;
943 struct batadv_priv *bat_priv;
944
945 delayed_work = to_delayed_work(work);
946 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
947 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
948
949 __batadv_mcast_mla_update(bat_priv);
950 batadv_mcast_start_timer(bat_priv);
951}
952
953/**
954 * batadv_mcast_is_report_ipv4() - check for IGMP reports
955 * @skb: the ethernet frame destined for the mesh
956 *
957 * This call might reallocate skb data.
958 *
959 * Checks whether the given frame is a valid IGMP report.
960 *
961 * Return: If so then true, otherwise false.
962 */
963static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
964{
965 if (ip_mc_check_igmp(skb) < 0)
966 return false;
967
968 switch (igmp_hdr(skb)->type) {
969 case IGMP_HOST_MEMBERSHIP_REPORT:
970 case IGMPV2_HOST_MEMBERSHIP_REPORT:
971 case IGMPV3_HOST_MEMBERSHIP_REPORT:
972 return true;
973 }
974
975 return false;
976}
977
978/**
979 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
980 * potential
981 * @bat_priv: the bat priv with all the soft interface information
982 * @skb: the IPv4 packet to check
983 * @is_unsnoopable: stores whether the destination is snoopable
984 * @is_routable: stores whether the destination is routable
985 *
986 * Checks whether the given IPv4 packet has the potential to be forwarded with a
987 * mode more optimal than classic flooding.
988 *
989 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
990 * allocation failure.
991 */
992static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
993 struct sk_buff *skb,
994 bool *is_unsnoopable,
995 int *is_routable)
996{
997 struct iphdr *iphdr;
998
999 /* We might fail due to out-of-memory -> drop it */
1000 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
1001 return -ENOMEM;
1002
1003 if (batadv_mcast_is_report_ipv4(skb))
1004 return -EINVAL;
1005
1006 iphdr = ip_hdr(skb);
1007
1008 /* link-local multicast listeners behind a bridge are
1009 * not snoopable (see RFC4541, section 2.1.2.2)
1010 */
1011 if (ipv4_is_local_multicast(iphdr->daddr))
1012 *is_unsnoopable = true;
1013 else
1014 *is_routable = ETH_P_IP;
1015
1016 return 0;
1017}
1018
1019/**
1020 * batadv_mcast_is_report_ipv6() - check for MLD reports
1021 * @skb: the ethernet frame destined for the mesh
1022 *
1023 * This call might reallocate skb data.
1024 *
1025 * Checks whether the given frame is a valid MLD report.
1026 *
1027 * Return: If so then true, otherwise false.
1028 */
1029static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
1030{
1031 if (ipv6_mc_check_mld(skb) < 0)
1032 return false;
1033
1034 switch (icmp6_hdr(skb)->icmp6_type) {
1035 case ICMPV6_MGM_REPORT:
1036 case ICMPV6_MLD2_REPORT:
1037 return true;
1038 }
1039
1040 return false;
1041}
1042
1043/**
1044 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
1045 * potential
1046 * @bat_priv: the bat priv with all the soft interface information
1047 * @skb: the IPv6 packet to check
1048 * @is_unsnoopable: stores whether the destination is snoopable
1049 * @is_routable: stores whether the destination is routable
1050 *
1051 * Checks whether the given IPv6 packet has the potential to be forwarded with a
1052 * mode more optimal than classic flooding.
1053 *
1054 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1055 */
1056static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
1057 struct sk_buff *skb,
1058 bool *is_unsnoopable,
1059 int *is_routable)
1060{
1061 struct ipv6hdr *ip6hdr;
1062
1063 /* We might fail due to out-of-memory -> drop it */
1064 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
1065 return -ENOMEM;
1066
1067 if (batadv_mcast_is_report_ipv6(skb))
1068 return -EINVAL;
1069
1070 ip6hdr = ipv6_hdr(skb);
1071
1072 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1073 return -EINVAL;
1074
1075 /* link-local-all-nodes multicast listeners behind a bridge are
1076 * not snoopable (see RFC4541, section 3, paragraph 3)
1077 */
1078 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
1079 *is_unsnoopable = true;
1080 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL)
1081 *is_routable = ETH_P_IPV6;
1082
1083 return 0;
1084}
1085
1086/**
1087 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
1088 * @bat_priv: the bat priv with all the soft interface information
1089 * @skb: the multicast frame to check
1090 * @is_unsnoopable: stores whether the destination is snoopable
1091 * @is_routable: stores whether the destination is routable
1092 *
1093 * Checks whether the given multicast ethernet frame has the potential to be
1094 * forwarded with a mode more optimal than classic flooding.
1095 *
1096 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1097 */
1098static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
1099 struct sk_buff *skb,
1100 bool *is_unsnoopable,
1101 int *is_routable)
1102{
1103 struct ethhdr *ethhdr = eth_hdr(skb);
1104
1105 if (!atomic_read(&bat_priv->multicast_mode))
1106 return -EINVAL;
1107
1108 switch (ntohs(ethhdr->h_proto)) {
1109 case ETH_P_IP:
1110 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
1111 is_unsnoopable,
1112 is_routable);
1113 case ETH_P_IPV6:
1114 if (!IS_ENABLED(CONFIG_IPV6))
1115 return -EINVAL;
1116
1117 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
1118 is_unsnoopable,
1119 is_routable);
1120 default:
1121 return -EINVAL;
1122 }
1123}
1124
1125/**
1126 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
1127 * interest
1128 * @bat_priv: the bat priv with all the soft interface information
1129 * @ethhdr: ethernet header of a packet
1130 *
1131 * Return: the number of nodes which want all IPv4 multicast traffic if the
1132 * given ethhdr is from an IPv4 packet or the number of nodes which want all
1133 * IPv6 traffic if it matches an IPv6 packet.
1134 */
1135static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
1136 struct ethhdr *ethhdr)
1137{
1138 switch (ntohs(ethhdr->h_proto)) {
1139 case ETH_P_IP:
1140 return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
1141 case ETH_P_IPV6:
1142 return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
1143 default:
1144 /* we shouldn't be here... */
1145 return 0;
1146 }
1147}
1148
1149/**
1150 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router
1151 * @bat_priv: the bat priv with all the soft interface information
1152 * @protocol: the ethernet protocol type to count multicast routers for
1153 *
1154 * Return: the number of nodes which want all routable IPv4 multicast traffic
1155 * if the protocol is ETH_P_IP or the number of nodes which want all routable
1156 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0.
1157 */
1158
1159static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
1160 int protocol)
1161{
1162 switch (protocol) {
1163 case ETH_P_IP:
1164 return atomic_read(&bat_priv->mcast.num_want_all_rtr4);
1165 case ETH_P_IPV6:
1166 return atomic_read(&bat_priv->mcast.num_want_all_rtr6);
1167 default:
1168 return 0;
1169 }
1170}
1171
1172/**
1173 * batadv_mcast_forw_mode_by_count() - get forwarding mode by count
1174 * @bat_priv: the bat priv with all the soft interface information
1175 * @skb: the multicast packet to check
1176 * @vid: the vlan identifier
1177 * @is_routable: stores whether the destination is routable
1178 * @count: the number of originators the multicast packet need to be sent to
1179 *
1180 * For a multicast packet with multiple destination originators, checks which
1181 * mode to use. For BATADV_FORW_MCAST it also encapsulates the packet with a
1182 * complete batman-adv multicast header.
1183 *
1184 * Return:
1185 * BATADV_FORW_MCAST: If all nodes have multicast packet routing
1186 * capabilities and an MTU >= 1280 on all hard interfaces (including us)
1187 * and the encapsulated multicast packet with all destination addresses
1188 * would still fit into an 1280 bytes batman-adv multicast packet
1189 * (excluding the outer ethernet frame) and we could successfully push
1190 * the full batman-adv multicast packet header.
1191 * BATADV_FORW_UCASTS: If the packet cannot be sent in a batman-adv
1192 * multicast packet and the amount of batman-adv unicast packets needed
1193 * is smaller or equal to the configured multicast fanout.
1194 * BATADV_FORW_BCAST: Otherwise.
1195 */
1196static enum batadv_forw_mode
1197batadv_mcast_forw_mode_by_count(struct batadv_priv *bat_priv,
1198 struct sk_buff *skb, unsigned short vid,
1199 int is_routable, int count)
1200{
1201 unsigned int mcast_hdrlen = batadv_mcast_forw_packet_hdrlen(count);
1202 u8 own_tvlv_flags = bat_priv->mcast.mla_flags.tvlv_flags;
1203
1204 if (!atomic_read(&bat_priv->mcast.num_no_mc_ptype_capa) &&
1205 own_tvlv_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA &&
1206 skb->len + mcast_hdrlen <= IPV6_MIN_MTU &&
1207 batadv_mcast_forw_push(bat_priv, skb, vid, is_routable, count))
1208 return BATADV_FORW_MCAST;
1209
1210 if (count <= atomic_read(&bat_priv->multicast_fanout))
1211 return BATADV_FORW_UCASTS;
1212
1213 return BATADV_FORW_BCAST;
1214}
1215
1216/**
1217 * batadv_mcast_forw_mode() - check on how to forward a multicast packet
1218 * @bat_priv: the bat priv with all the soft interface information
1219 * @skb: the multicast packet to check
1220 * @vid: the vlan identifier
1221 * @is_routable: stores whether the destination is routable
1222 *
1223 * Return: The forwarding mode as enum batadv_forw_mode.
1224 */
1225enum batadv_forw_mode
1226batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
1227 unsigned short vid, int *is_routable)
1228{
1229 int ret, tt_count, ip_count, unsnoop_count, total_count;
1230 bool is_unsnoopable = false;
1231 struct ethhdr *ethhdr;
1232 int rtr_count = 0;
1233
1234 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
1235 is_routable);
1236 if (ret == -ENOMEM)
1237 return BATADV_FORW_NONE;
1238 else if (ret < 0)
1239 return BATADV_FORW_BCAST;
1240
1241 ethhdr = eth_hdr(skb);
1242
1243 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
1244 BATADV_NO_FLAGS);
1245 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
1246 unsnoop_count = !is_unsnoopable ? 0 :
1247 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
1248 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
1249
1250 total_count = tt_count + ip_count + unsnoop_count + rtr_count;
1251
1252 if (!total_count)
1253 return BATADV_FORW_NONE;
1254 else if (unsnoop_count)
1255 return BATADV_FORW_BCAST;
1256
1257 return batadv_mcast_forw_mode_by_count(bat_priv, skb, vid, *is_routable,
1258 total_count);
1259}
1260
1261/**
1262 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator
1263 * @bat_priv: the bat priv with all the soft interface information
1264 * @skb: the multicast packet to send
1265 * @vid: the vlan identifier
1266 * @orig_node: the originator to send the packet to
1267 *
1268 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
1269 */
1270static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
1271 struct sk_buff *skb,
1272 unsigned short vid,
1273 struct batadv_orig_node *orig_node)
1274{
1275 /* Avoid sending multicast-in-unicast packets to other BLA
1276 * gateways - they already got the frame from the LAN side
1277 * we share with them.
1278 * TODO: Refactor to take BLA into account earlier, to avoid
1279 * reducing the mcast_fanout count.
1280 */
1281 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
1282 dev_kfree_skb(skb);
1283 return NET_XMIT_SUCCESS;
1284 }
1285
1286 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
1287 orig_node, vid);
1288}
1289
1290/**
1291 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
1292 * @bat_priv: the bat priv with all the soft interface information
1293 * @skb: the multicast packet to transmit
1294 * @vid: the vlan identifier
1295 *
1296 * Sends copies of a frame with multicast destination to any multicast
1297 * listener registered in the translation table. A transmission is performed
1298 * via a batman-adv unicast packet for each such destination node.
1299 *
1300 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1301 * otherwise.
1302 */
1303static int
1304batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1305 unsigned short vid)
1306{
1307 int ret = NET_XMIT_SUCCESS;
1308 struct sk_buff *newskb;
1309
1310 struct batadv_tt_orig_list_entry *orig_entry;
1311
1312 struct batadv_tt_global_entry *tt_global;
1313 const u8 *addr = eth_hdr(skb)->h_dest;
1314
1315 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1316 if (!tt_global)
1317 goto out;
1318
1319 rcu_read_lock();
1320 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1321 newskb = skb_copy(skb, GFP_ATOMIC);
1322 if (!newskb) {
1323 ret = NET_XMIT_DROP;
1324 break;
1325 }
1326
1327 batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
1328 orig_entry->orig_node);
1329 }
1330 rcu_read_unlock();
1331
1332 batadv_tt_global_entry_put(tt_global);
1333
1334out:
1335 return ret;
1336}
1337
1338/**
1339 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
1340 * @bat_priv: the bat priv with all the soft interface information
1341 * @skb: the multicast packet to transmit
1342 * @vid: the vlan identifier
1343 *
1344 * Sends copies of a frame with multicast destination to any node with a
1345 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
1346 * batman-adv unicast packet for each such destination node.
1347 *
1348 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1349 * otherwise.
1350 */
1351static int
1352batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1353 struct sk_buff *skb, unsigned short vid)
1354{
1355 struct batadv_orig_node *orig_node;
1356 int ret = NET_XMIT_SUCCESS;
1357 struct sk_buff *newskb;
1358
1359 rcu_read_lock();
1360 hlist_for_each_entry_rcu(orig_node,
1361 &bat_priv->mcast.want_all_ipv4_list,
1362 mcast_want_all_ipv4_node) {
1363 newskb = skb_copy(skb, GFP_ATOMIC);
1364 if (!newskb) {
1365 ret = NET_XMIT_DROP;
1366 break;
1367 }
1368
1369 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1370 }
1371 rcu_read_unlock();
1372 return ret;
1373}
1374
1375/**
1376 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
1377 * @bat_priv: the bat priv with all the soft interface information
1378 * @skb: The multicast packet to transmit
1379 * @vid: the vlan identifier
1380 *
1381 * Sends copies of a frame with multicast destination to any node with a
1382 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
1383 * batman-adv unicast packet for each such destination node.
1384 *
1385 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1386 * otherwise.
1387 */
1388static int
1389batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1390 struct sk_buff *skb, unsigned short vid)
1391{
1392 struct batadv_orig_node *orig_node;
1393 int ret = NET_XMIT_SUCCESS;
1394 struct sk_buff *newskb;
1395
1396 rcu_read_lock();
1397 hlist_for_each_entry_rcu(orig_node,
1398 &bat_priv->mcast.want_all_ipv6_list,
1399 mcast_want_all_ipv6_node) {
1400 newskb = skb_copy(skb, GFP_ATOMIC);
1401 if (!newskb) {
1402 ret = NET_XMIT_DROP;
1403 break;
1404 }
1405
1406 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1407 }
1408 rcu_read_unlock();
1409 return ret;
1410}
1411
1412/**
1413 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
1414 * @bat_priv: the bat priv with all the soft interface information
1415 * @skb: the multicast packet to transmit
1416 * @vid: the vlan identifier
1417 *
1418 * Sends copies of a frame with multicast destination to any node with a
1419 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
1420 * transmission is performed via a batman-adv unicast packet for each such
1421 * destination node.
1422 *
1423 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1424 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1425 */
1426static int
1427batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1428 struct sk_buff *skb, unsigned short vid)
1429{
1430 switch (ntohs(eth_hdr(skb)->h_proto)) {
1431 case ETH_P_IP:
1432 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1433 case ETH_P_IPV6:
1434 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1435 default:
1436 /* we shouldn't be here... */
1437 return NET_XMIT_DROP;
1438 }
1439}
1440
1441/**
1442 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4
1443 * @bat_priv: the bat priv with all the soft interface information
1444 * @skb: the multicast packet to transmit
1445 * @vid: the vlan identifier
1446 *
1447 * Sends copies of a frame with multicast destination to any node with a
1448 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a
1449 * batman-adv unicast packet for each such destination node.
1450 *
1451 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1452 * otherwise.
1453 */
1454static int
1455batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
1456 struct sk_buff *skb, unsigned short vid)
1457{
1458 struct batadv_orig_node *orig_node;
1459 int ret = NET_XMIT_SUCCESS;
1460 struct sk_buff *newskb;
1461
1462 rcu_read_lock();
1463 hlist_for_each_entry_rcu(orig_node,
1464 &bat_priv->mcast.want_all_rtr4_list,
1465 mcast_want_all_rtr4_node) {
1466 newskb = skb_copy(skb, GFP_ATOMIC);
1467 if (!newskb) {
1468 ret = NET_XMIT_DROP;
1469 break;
1470 }
1471
1472 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1473 }
1474 rcu_read_unlock();
1475 return ret;
1476}
1477
1478/**
1479 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6
1480 * @bat_priv: the bat priv with all the soft interface information
1481 * @skb: The multicast packet to transmit
1482 * @vid: the vlan identifier
1483 *
1484 * Sends copies of a frame with multicast destination to any node with a
1485 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a
1486 * batman-adv unicast packet for each such destination node.
1487 *
1488 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1489 * otherwise.
1490 */
1491static int
1492batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
1493 struct sk_buff *skb, unsigned short vid)
1494{
1495 struct batadv_orig_node *orig_node;
1496 int ret = NET_XMIT_SUCCESS;
1497 struct sk_buff *newskb;
1498
1499 rcu_read_lock();
1500 hlist_for_each_entry_rcu(orig_node,
1501 &bat_priv->mcast.want_all_rtr6_list,
1502 mcast_want_all_rtr6_node) {
1503 newskb = skb_copy(skb, GFP_ATOMIC);
1504 if (!newskb) {
1505 ret = NET_XMIT_DROP;
1506 break;
1507 }
1508
1509 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1510 }
1511 rcu_read_unlock();
1512 return ret;
1513}
1514
1515/**
1516 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list
1517 * @bat_priv: the bat priv with all the soft interface information
1518 * @skb: the multicast packet to transmit
1519 * @vid: the vlan identifier
1520 *
1521 * Sends copies of a frame with multicast destination to any node with a
1522 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A
1523 * transmission is performed via a batman-adv unicast packet for each such
1524 * destination node.
1525 *
1526 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1527 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1528 */
1529static int
1530batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
1531 struct sk_buff *skb, unsigned short vid)
1532{
1533 switch (ntohs(eth_hdr(skb)->h_proto)) {
1534 case ETH_P_IP:
1535 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
1536 case ETH_P_IPV6:
1537 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
1538 default:
1539 /* we shouldn't be here... */
1540 return NET_XMIT_DROP;
1541 }
1542}
1543
1544/**
1545 * batadv_mcast_forw_send() - send packet to any detected multicast recipient
1546 * @bat_priv: the bat priv with all the soft interface information
1547 * @skb: the multicast packet to transmit
1548 * @vid: the vlan identifier
1549 * @is_routable: stores whether the destination is routable
1550 *
1551 * Sends copies of a frame with multicast destination to any node that signaled
1552 * interest in it, that is either via the translation table or the according
1553 * want-all flags. A transmission is performed via a batman-adv unicast packet
1554 * for each such destination node.
1555 *
1556 * The given skb is consumed/freed.
1557 *
1558 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1559 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1560 */
1561int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1562 unsigned short vid, int is_routable)
1563{
1564 int ret;
1565
1566 ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1567 if (ret != NET_XMIT_SUCCESS) {
1568 kfree_skb(skb);
1569 return ret;
1570 }
1571
1572 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1573 if (ret != NET_XMIT_SUCCESS) {
1574 kfree_skb(skb);
1575 return ret;
1576 }
1577
1578 if (!is_routable)
1579 goto skip_mc_router;
1580
1581 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
1582 if (ret != NET_XMIT_SUCCESS) {
1583 kfree_skb(skb);
1584 return ret;
1585 }
1586
1587skip_mc_router:
1588 consume_skb(skb);
1589 return ret;
1590}
1591
1592/**
1593 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
1594 * @bat_priv: the bat priv with all the soft interface information
1595 * @orig: the orig_node which multicast state might have changed of
1596 * @mcast_flags: flags indicating the new multicast state
1597 *
1598 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
1599 * orig, has toggled then this method updates the counter and the list
1600 * accordingly.
1601 *
1602 * Caller needs to hold orig->mcast_handler_lock.
1603 */
1604static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1605 struct batadv_orig_node *orig,
1606 u8 mcast_flags)
1607{
1608 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1609 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1610
1611 lockdep_assert_held(&orig->mcast_handler_lock);
1612
1613 /* switched from flag unset to set */
1614 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1615 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1616 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1617
1618 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1619 /* flag checks above + mcast_handler_lock prevents this */
1620 WARN_ON(!hlist_unhashed(node));
1621
1622 hlist_add_head_rcu(node, head);
1623 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1624 /* switched from flag set to unset */
1625 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1626 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1627 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1628
1629 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1630 /* flag checks above + mcast_handler_lock prevents this */
1631 WARN_ON(hlist_unhashed(node));
1632
1633 hlist_del_init_rcu(node);
1634 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1635 }
1636}
1637
1638/**
1639 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
1640 * @bat_priv: the bat priv with all the soft interface information
1641 * @orig: the orig_node which multicast state might have changed of
1642 * @mcast_flags: flags indicating the new multicast state
1643 *
1644 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
1645 * toggled then this method updates the counter and the list accordingly.
1646 *
1647 * Caller needs to hold orig->mcast_handler_lock.
1648 */
1649static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1650 struct batadv_orig_node *orig,
1651 u8 mcast_flags)
1652{
1653 struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1654 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1655
1656 lockdep_assert_held(&orig->mcast_handler_lock);
1657
1658 /* switched from flag unset to set */
1659 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1660 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1661 atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1662
1663 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1664 /* flag checks above + mcast_handler_lock prevents this */
1665 WARN_ON(!hlist_unhashed(node));
1666
1667 hlist_add_head_rcu(node, head);
1668 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1669 /* switched from flag set to unset */
1670 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1671 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1672 atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1673
1674 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1675 /* flag checks above + mcast_handler_lock prevents this */
1676 WARN_ON(hlist_unhashed(node));
1677
1678 hlist_del_init_rcu(node);
1679 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1680 }
1681}
1682
1683/**
1684 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
1685 * @bat_priv: the bat priv with all the soft interface information
1686 * @orig: the orig_node which multicast state might have changed of
1687 * @mcast_flags: flags indicating the new multicast state
1688 *
1689 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
1690 * toggled then this method updates the counter and the list accordingly.
1691 *
1692 * Caller needs to hold orig->mcast_handler_lock.
1693 */
1694static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1695 struct batadv_orig_node *orig,
1696 u8 mcast_flags)
1697{
1698 struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1699 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1700
1701 lockdep_assert_held(&orig->mcast_handler_lock);
1702
1703 /* switched from flag unset to set */
1704 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1705 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1706 atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1707
1708 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1709 /* flag checks above + mcast_handler_lock prevents this */
1710 WARN_ON(!hlist_unhashed(node));
1711
1712 hlist_add_head_rcu(node, head);
1713 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1714 /* switched from flag set to unset */
1715 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1716 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1717 atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1718
1719 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1720 /* flag checks above + mcast_handler_lock prevents this */
1721 WARN_ON(hlist_unhashed(node));
1722
1723 hlist_del_init_rcu(node);
1724 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1725 }
1726}
1727
1728/**
1729 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list
1730 * @bat_priv: the bat priv with all the soft interface information
1731 * @orig: the orig_node which multicast state might have changed of
1732 * @mcast_flags: flags indicating the new multicast state
1733 *
1734 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has
1735 * toggled then this method updates the counter and the list accordingly.
1736 *
1737 * Caller needs to hold orig->mcast_handler_lock.
1738 */
1739static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
1740 struct batadv_orig_node *orig,
1741 u8 mcast_flags)
1742{
1743 struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
1744 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list;
1745
1746 lockdep_assert_held(&orig->mcast_handler_lock);
1747
1748 /* switched from flag set to unset */
1749 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) &&
1750 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) {
1751 atomic_inc(&bat_priv->mcast.num_want_all_rtr4);
1752
1753 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1754 /* flag checks above + mcast_handler_lock prevents this */
1755 WARN_ON(!hlist_unhashed(node));
1756
1757 hlist_add_head_rcu(node, head);
1758 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1759 /* switched from flag unset to set */
1760 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 &&
1761 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) {
1762 atomic_dec(&bat_priv->mcast.num_want_all_rtr4);
1763
1764 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1765 /* flag checks above + mcast_handler_lock prevents this */
1766 WARN_ON(hlist_unhashed(node));
1767
1768 hlist_del_init_rcu(node);
1769 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1770 }
1771}
1772
1773/**
1774 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list
1775 * @bat_priv: the bat priv with all the soft interface information
1776 * @orig: the orig_node which multicast state might have changed of
1777 * @mcast_flags: flags indicating the new multicast state
1778 *
1779 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has
1780 * toggled then this method updates the counter and the list accordingly.
1781 *
1782 * Caller needs to hold orig->mcast_handler_lock.
1783 */
1784static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv,
1785 struct batadv_orig_node *orig,
1786 u8 mcast_flags)
1787{
1788 struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
1789 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list;
1790
1791 lockdep_assert_held(&orig->mcast_handler_lock);
1792
1793 /* switched from flag set to unset */
1794 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) &&
1795 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) {
1796 atomic_inc(&bat_priv->mcast.num_want_all_rtr6);
1797
1798 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1799 /* flag checks above + mcast_handler_lock prevents this */
1800 WARN_ON(!hlist_unhashed(node));
1801
1802 hlist_add_head_rcu(node, head);
1803 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1804 /* switched from flag unset to set */
1805 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 &&
1806 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) {
1807 atomic_dec(&bat_priv->mcast.num_want_all_rtr6);
1808
1809 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1810 /* flag checks above + mcast_handler_lock prevents this */
1811 WARN_ON(hlist_unhashed(node));
1812
1813 hlist_del_init_rcu(node);
1814 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1815 }
1816}
1817
1818/**
1819 * batadv_mcast_have_mc_ptype_update() - update multicast packet type counter
1820 * @bat_priv: the bat priv with all the soft interface information
1821 * @orig: the orig_node which multicast state might have changed of
1822 * @mcast_flags: flags indicating the new multicast state
1823 *
1824 * If the BATADV_MCAST_HAVE_MC_PTYPE_CAPA flag of this originator, orig, has
1825 * toggled then this method updates the counter accordingly.
1826 */
1827static void batadv_mcast_have_mc_ptype_update(struct batadv_priv *bat_priv,
1828 struct batadv_orig_node *orig,
1829 u8 mcast_flags)
1830{
1831 lockdep_assert_held(&orig->mcast_handler_lock);
1832
1833 /* switched from flag set to unset */
1834 if (!(mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) &&
1835 orig->mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA)
1836 atomic_inc(&bat_priv->mcast.num_no_mc_ptype_capa);
1837 /* switched from flag unset to set */
1838 else if (mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA &&
1839 !(orig->mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA))
1840 atomic_dec(&bat_priv->mcast.num_no_mc_ptype_capa);
1841}
1842
1843/**
1844 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV
1845 * @enabled: whether the originator has multicast TVLV support enabled
1846 * @tvlv_value: tvlv buffer containing the multicast flags
1847 * @tvlv_value_len: tvlv buffer length
1848 *
1849 * Return: multicast flags for the given tvlv buffer
1850 */
1851static u8
1852batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len)
1853{
1854 u8 mcast_flags = BATADV_NO_FLAGS;
1855
1856 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags))
1857 mcast_flags = *(u8 *)tvlv_value;
1858
1859 if (!enabled) {
1860 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
1861 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
1862 }
1863
1864 /* remove redundant flags to avoid sending duplicate packets later */
1865 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)
1866 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4;
1867
1868 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)
1869 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
1870
1871 return mcast_flags;
1872}
1873
1874/**
1875 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
1876 * @bat_priv: the bat priv with all the soft interface information
1877 * @orig: the orig_node of the ogm
1878 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
1879 * @tvlv_value: tvlv buffer containing the multicast data
1880 * @tvlv_value_len: tvlv buffer length
1881 */
1882static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
1883 struct batadv_orig_node *orig,
1884 u8 flags,
1885 void *tvlv_value,
1886 u16 tvlv_value_len)
1887{
1888 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1889 u8 mcast_flags;
1890
1891 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled,
1892 tvlv_value, tvlv_value_len);
1893
1894 spin_lock_bh(&orig->mcast_handler_lock);
1895
1896 if (orig_mcast_enabled &&
1897 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1898 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1899 } else if (!orig_mcast_enabled &&
1900 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1901 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1902 }
1903
1904 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
1905
1906 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
1907 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
1908 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
1909 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags);
1910 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags);
1911 batadv_mcast_have_mc_ptype_update(bat_priv, orig, mcast_flags);
1912
1913 orig->mcast_flags = mcast_flags;
1914 spin_unlock_bh(&orig->mcast_handler_lock);
1915}
1916
1917/**
1918 * batadv_mcast_init() - initialize the multicast optimizations structures
1919 * @bat_priv: the bat priv with all the soft interface information
1920 */
1921void batadv_mcast_init(struct batadv_priv *bat_priv)
1922{
1923 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
1924 NULL, NULL, BATADV_TVLV_MCAST, 2,
1925 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1926 batadv_tvlv_handler_register(bat_priv, NULL, NULL,
1927 batadv_mcast_forw_tracker_tvlv_handler,
1928 BATADV_TVLV_MCAST_TRACKER, 1,
1929 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1930
1931 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
1932 batadv_mcast_start_timer(bat_priv);
1933}
1934
1935/**
1936 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
1937 * @msg: buffer for the message
1938 * @bat_priv: the bat priv with all the soft interface information
1939 *
1940 * Return: 0 or error code.
1941 */
1942int batadv_mcast_mesh_info_put(struct sk_buff *msg,
1943 struct batadv_priv *bat_priv)
1944{
1945 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
1946 u32 flags_priv = BATADV_NO_FLAGS;
1947
1948 if (bat_priv->mcast.mla_flags.bridged) {
1949 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
1950
1951 if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
1952 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
1953 if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
1954 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
1955 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
1956 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
1957 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
1958 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
1959 }
1960
1961 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
1962 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
1963 return -EMSGSIZE;
1964
1965 return 0;
1966}
1967
1968/**
1969 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
1970 * to a netlink socket
1971 * @msg: buffer for the message
1972 * @portid: netlink port
1973 * @cb: Control block containing additional options
1974 * @orig_node: originator to dump the multicast flags of
1975 *
1976 * Return: 0 or error code.
1977 */
1978static int
1979batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
1980 struct netlink_callback *cb,
1981 struct batadv_orig_node *orig_node)
1982{
1983 void *hdr;
1984
1985 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
1986 &batadv_netlink_family, NLM_F_MULTI,
1987 BATADV_CMD_GET_MCAST_FLAGS);
1988 if (!hdr)
1989 return -ENOBUFS;
1990
1991 genl_dump_check_consistent(cb, hdr);
1992
1993 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
1994 orig_node->orig)) {
1995 genlmsg_cancel(msg, hdr);
1996 return -EMSGSIZE;
1997 }
1998
1999 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2000 &orig_node->capabilities)) {
2001 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
2002 orig_node->mcast_flags)) {
2003 genlmsg_cancel(msg, hdr);
2004 return -EMSGSIZE;
2005 }
2006 }
2007
2008 genlmsg_end(msg, hdr);
2009 return 0;
2010}
2011
2012/**
2013 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
2014 * table to a netlink socket
2015 * @msg: buffer for the message
2016 * @portid: netlink port
2017 * @cb: Control block containing additional options
2018 * @hash: hash to dump
2019 * @bucket: bucket index to dump
2020 * @idx_skip: How many entries to skip
2021 *
2022 * Return: 0 or error code.
2023 */
2024static int
2025batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
2026 struct netlink_callback *cb,
2027 struct batadv_hashtable *hash,
2028 unsigned int bucket, long *idx_skip)
2029{
2030 struct batadv_orig_node *orig_node;
2031 long idx = 0;
2032
2033 spin_lock_bh(&hash->list_locks[bucket]);
2034 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2035
2036 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
2037 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2038 &orig_node->capa_initialized))
2039 continue;
2040
2041 if (idx < *idx_skip)
2042 goto skip;
2043
2044 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
2045 spin_unlock_bh(&hash->list_locks[bucket]);
2046 *idx_skip = idx;
2047
2048 return -EMSGSIZE;
2049 }
2050
2051skip:
2052 idx++;
2053 }
2054 spin_unlock_bh(&hash->list_locks[bucket]);
2055
2056 return 0;
2057}
2058
2059/**
2060 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
2061 * @msg: buffer for the message
2062 * @portid: netlink port
2063 * @cb: Control block containing additional options
2064 * @bat_priv: the bat priv with all the soft interface information
2065 * @bucket: current bucket to dump
2066 * @idx: index in current bucket to the next entry to dump
2067 *
2068 * Return: 0 or error code.
2069 */
2070static int
2071__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2072 struct netlink_callback *cb,
2073 struct batadv_priv *bat_priv, long *bucket, long *idx)
2074{
2075 struct batadv_hashtable *hash = bat_priv->orig_hash;
2076 long bucket_tmp = *bucket;
2077 long idx_tmp = *idx;
2078
2079 while (bucket_tmp < hash->size) {
2080 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2081 bucket_tmp, &idx_tmp))
2082 break;
2083
2084 bucket_tmp++;
2085 idx_tmp = 0;
2086 }
2087
2088 *bucket = bucket_tmp;
2089 *idx = idx_tmp;
2090
2091 return msg->len;
2092}
2093
2094/**
2095 * batadv_mcast_netlink_get_primary() - get primary interface from netlink
2096 * callback
2097 * @cb: netlink callback structure
2098 * @primary_if: the primary interface pointer to return the result in
2099 *
2100 * Return: 0 or error code.
2101 */
2102static int
2103batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
2104 struct batadv_hard_iface **primary_if)
2105{
2106 struct batadv_hard_iface *hard_iface = NULL;
2107 struct net *net = sock_net(cb->skb->sk);
2108 struct net_device *soft_iface;
2109 struct batadv_priv *bat_priv;
2110 int ifindex;
2111 int ret = 0;
2112
2113 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
2114 if (!ifindex)
2115 return -EINVAL;
2116
2117 soft_iface = dev_get_by_index(net, ifindex);
2118 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2119 ret = -ENODEV;
2120 goto out;
2121 }
2122
2123 bat_priv = netdev_priv(soft_iface);
2124
2125 hard_iface = batadv_primary_if_get_selected(bat_priv);
2126 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
2127 ret = -ENOENT;
2128 goto out;
2129 }
2130
2131out:
2132 dev_put(soft_iface);
2133
2134 if (!ret && primary_if)
2135 *primary_if = hard_iface;
2136 else
2137 batadv_hardif_put(hard_iface);
2138
2139 return ret;
2140}
2141
2142/**
2143 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
2144 * @msg: buffer for the message
2145 * @cb: callback structure containing arguments
2146 *
2147 * Return: message length.
2148 */
2149int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
2150{
2151 struct batadv_hard_iface *primary_if = NULL;
2152 int portid = NETLINK_CB(cb->skb).portid;
2153 struct batadv_priv *bat_priv;
2154 long *bucket = &cb->args[0];
2155 long *idx = &cb->args[1];
2156 int ret;
2157
2158 ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
2159 if (ret)
2160 return ret;
2161
2162 bat_priv = netdev_priv(primary_if->soft_iface);
2163 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
2164
2165 batadv_hardif_put(primary_if);
2166 return ret;
2167}
2168
2169/**
2170 * batadv_mcast_free() - free the multicast optimizations structures
2171 * @bat_priv: the bat priv with all the soft interface information
2172 */
2173void batadv_mcast_free(struct batadv_priv *bat_priv)
2174{
2175 cancel_delayed_work_sync(&bat_priv->mcast.work);
2176
2177 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2178 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST_TRACKER, 1);
2179 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2180
2181 /* safely calling outside of worker, as worker was canceled above */
2182 batadv_mcast_mla_tt_retract(bat_priv, NULL);
2183}
2184
2185/**
2186 * batadv_mcast_purge_orig() - reset originator global mcast state modifications
2187 * @orig: the originator which is going to get purged
2188 */
2189void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2190{
2191 struct batadv_priv *bat_priv = orig->bat_priv;
2192
2193 spin_lock_bh(&orig->mcast_handler_lock);
2194
2195 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2196 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2197 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2198 batadv_mcast_want_rtr4_update(bat_priv, orig,
2199 BATADV_MCAST_WANT_NO_RTR4);
2200 batadv_mcast_want_rtr6_update(bat_priv, orig,
2201 BATADV_MCAST_WANT_NO_RTR6);
2202 batadv_mcast_have_mc_ptype_update(bat_priv, orig,
2203 BATADV_MCAST_HAVE_MC_PTYPE_CAPA);
2204
2205 spin_unlock_bh(&orig->mcast_handler_lock);
2206}