Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2// Copyright (c) 2020, Nikolay Aleksandrov <nikolay@nvidia.com>
  3#include <linux/err.h>
  4#include <linux/export.h>
  5#include <linux/if_ether.h>
  6#include <linux/igmp.h>
  7#include <linux/in.h>
  8#include <linux/jhash.h>
  9#include <linux/kernel.h>
 10#include <linux/log2.h>
 11#include <linux/netdevice.h>
 12#include <linux/netfilter_bridge.h>
 13#include <linux/random.h>
 14#include <linux/rculist.h>
 15#include <linux/skbuff.h>
 16#include <linux/slab.h>
 17#include <linux/timer.h>
 18#include <linux/inetdevice.h>
 19#include <linux/mroute.h>
 20#include <net/ip.h>
 21#include <net/switchdev.h>
 22#if IS_ENABLED(CONFIG_IPV6)
 23#include <linux/icmpv6.h>
 24#include <net/ipv6.h>
 25#include <net/mld.h>
 26#include <net/ip6_checksum.h>
 27#include <net/addrconf.h>
 28#endif
 29
 30#include "br_private.h"
 31#include "br_private_mcast_eht.h"
 32
 33static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
 34					   union net_bridge_eht_addr *src_addr,
 35					   union net_bridge_eht_addr *h_addr);
 36static void br_multicast_create_eht_set_entry(struct net_bridge_port_group *pg,
 
 37					      union net_bridge_eht_addr *src_addr,
 38					      union net_bridge_eht_addr *h_addr,
 39					      int filter_mode,
 40					      bool allow_zero_src);
 41
 42static struct net_bridge_group_eht_host *
 43br_multicast_eht_host_lookup(struct net_bridge_port_group *pg,
 44			     union net_bridge_eht_addr *h_addr)
 45{
 46	struct rb_node *node = pg->eht_host_tree.rb_node;
 47
 48	while (node) {
 49		struct net_bridge_group_eht_host *this;
 50		int result;
 51
 52		this = rb_entry(node, struct net_bridge_group_eht_host,
 53				rb_node);
 54		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
 55		if (result < 0)
 56			node = node->rb_left;
 57		else if (result > 0)
 58			node = node->rb_right;
 59		else
 60			return this;
 61	}
 62
 63	return NULL;
 64}
 65
 66static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group *pg,
 67					     union net_bridge_eht_addr *h_addr)
 68{
 69	struct net_bridge_group_eht_host *eht_host;
 70
 71	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
 72	if (!eht_host)
 73		return MCAST_INCLUDE;
 74
 75	return eht_host->filter_mode;
 76}
 77
 78static struct net_bridge_group_eht_set_entry *
 79br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set *eht_set,
 80				  union net_bridge_eht_addr *h_addr)
 81{
 82	struct rb_node *node = eht_set->entry_tree.rb_node;
 83
 84	while (node) {
 85		struct net_bridge_group_eht_set_entry *this;
 86		int result;
 87
 88		this = rb_entry(node, struct net_bridge_group_eht_set_entry,
 89				rb_node);
 90		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
 91		if (result < 0)
 92			node = node->rb_left;
 93		else if (result > 0)
 94			node = node->rb_right;
 95		else
 96			return this;
 97	}
 98
 99	return NULL;
100}
101
102static struct net_bridge_group_eht_set *
103br_multicast_eht_set_lookup(struct net_bridge_port_group *pg,
104			    union net_bridge_eht_addr *src_addr)
105{
106	struct rb_node *node = pg->eht_set_tree.rb_node;
107
108	while (node) {
109		struct net_bridge_group_eht_set *this;
110		int result;
111
112		this = rb_entry(node, struct net_bridge_group_eht_set,
113				rb_node);
114		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
115		if (result < 0)
116			node = node->rb_left;
117		else if (result > 0)
118			node = node->rb_right;
119		else
120			return this;
121	}
122
123	return NULL;
124}
125
126static void __eht_destroy_host(struct net_bridge_group_eht_host *eht_host)
127{
128	WARN_ON(!hlist_empty(&eht_host->set_entries));
129
130	br_multicast_eht_hosts_dec(eht_host->pg);
131
132	rb_erase(&eht_host->rb_node, &eht_host->pg->eht_host_tree);
133	RB_CLEAR_NODE(&eht_host->rb_node);
134	kfree(eht_host);
135}
136
137static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
138{
139	struct net_bridge_group_eht_set_entry *set_h;
140
141	set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
142	WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
143
144	del_timer_sync(&set_h->timer);
145	kfree(set_h);
146}
147
148static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
149{
150	struct net_bridge_group_eht_set *eht_set;
151
152	eht_set = container_of(gc, struct net_bridge_group_eht_set, mcast_gc);
153	WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
154	WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
155
156	del_timer_sync(&eht_set->timer);
157	kfree(eht_set);
158}
159
160static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry *set_h)
161{
162	struct net_bridge_group_eht_host *eht_host = set_h->h_parent;
163	union net_bridge_eht_addr zero_addr;
164
165	rb_erase(&set_h->rb_node, &set_h->eht_set->entry_tree);
166	RB_CLEAR_NODE(&set_h->rb_node);
167	hlist_del_init(&set_h->host_list);
168	memset(&zero_addr, 0, sizeof(zero_addr));
169	if (memcmp(&set_h->h_addr, &zero_addr, sizeof(zero_addr)))
170		eht_host->num_entries--;
171	hlist_add_head(&set_h->mcast_gc.gc_node, &set_h->br->mcast_gc_list);
172	queue_work(system_long_wq, &set_h->br->mcast_gc_work);
173
174	if (hlist_empty(&eht_host->set_entries))
175		__eht_destroy_host(eht_host);
176}
177
178static void br_multicast_del_eht_set(struct net_bridge_group_eht_set *eht_set)
179{
180	struct net_bridge_group_eht_set_entry *set_h;
181	struct rb_node *node;
182
183	while ((node = rb_first(&eht_set->entry_tree))) {
184		set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
185				 rb_node);
186		__eht_del_set_entry(set_h);
187	}
188
189	rb_erase(&eht_set->rb_node, &eht_set->pg->eht_set_tree);
190	RB_CLEAR_NODE(&eht_set->rb_node);
191	hlist_add_head(&eht_set->mcast_gc.gc_node, &eht_set->br->mcast_gc_list);
192	queue_work(system_long_wq, &eht_set->br->mcast_gc_work);
193}
194
195void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg)
196{
197	struct net_bridge_group_eht_set *eht_set;
198	struct rb_node *node;
199
200	while ((node = rb_first(&pg->eht_set_tree))) {
201		eht_set = rb_entry(node, struct net_bridge_group_eht_set,
202				   rb_node);
203		br_multicast_del_eht_set(eht_set);
204	}
205}
206
207static void br_multicast_eht_set_entry_expired(struct timer_list *t)
208{
209	struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer);
210	struct net_bridge *br = set_h->br;
211
212	spin_lock(&br->multicast_lock);
213	if (RB_EMPTY_NODE(&set_h->rb_node) || timer_pending(&set_h->timer))
214		goto out;
215
216	br_multicast_del_eht_set_entry(set_h->eht_set->pg,
217				       &set_h->eht_set->src_addr,
218				       &set_h->h_addr);
219out:
220	spin_unlock(&br->multicast_lock);
221}
222
223static void br_multicast_eht_set_expired(struct timer_list *t)
224{
225	struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t,
226							      timer);
227	struct net_bridge *br = eht_set->br;
228
229	spin_lock(&br->multicast_lock);
230	if (RB_EMPTY_NODE(&eht_set->rb_node) || timer_pending(&eht_set->timer))
231		goto out;
232
233	br_multicast_del_eht_set(eht_set);
234out:
235	spin_unlock(&br->multicast_lock);
236}
237
238static struct net_bridge_group_eht_host *
239__eht_lookup_create_host(struct net_bridge_port_group *pg,
240			 union net_bridge_eht_addr *h_addr,
241			 unsigned char filter_mode)
242{
243	struct rb_node **link = &pg->eht_host_tree.rb_node, *parent = NULL;
244	struct net_bridge_group_eht_host *eht_host;
245
246	while (*link) {
247		struct net_bridge_group_eht_host *this;
248		int result;
249
250		this = rb_entry(*link, struct net_bridge_group_eht_host,
251				rb_node);
252		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
253		parent = *link;
254		if (result < 0)
255			link = &((*link)->rb_left);
256		else if (result > 0)
257			link = &((*link)->rb_right);
258		else
259			return this;
260	}
261
262	if (br_multicast_eht_hosts_over_limit(pg))
263		return NULL;
264
265	eht_host = kzalloc(sizeof(*eht_host), GFP_ATOMIC);
266	if (!eht_host)
267		return NULL;
268
269	memcpy(&eht_host->h_addr, h_addr, sizeof(*h_addr));
270	INIT_HLIST_HEAD(&eht_host->set_entries);
271	eht_host->pg = pg;
272	eht_host->filter_mode = filter_mode;
273
274	rb_link_node(&eht_host->rb_node, parent, link);
275	rb_insert_color(&eht_host->rb_node, &pg->eht_host_tree);
276
277	br_multicast_eht_hosts_inc(pg);
278
279	return eht_host;
280}
281
282static struct net_bridge_group_eht_set_entry *
283__eht_lookup_create_set_entry(struct net_bridge *br,
284			      struct net_bridge_group_eht_set *eht_set,
285			      struct net_bridge_group_eht_host *eht_host,
286			      bool allow_zero_src)
287{
288	struct rb_node **link = &eht_set->entry_tree.rb_node, *parent = NULL;
289	struct net_bridge_group_eht_set_entry *set_h;
290
291	while (*link) {
292		struct net_bridge_group_eht_set_entry *this;
293		int result;
294
295		this = rb_entry(*link, struct net_bridge_group_eht_set_entry,
296				rb_node);
297		result = memcmp(&eht_host->h_addr, &this->h_addr,
298				sizeof(union net_bridge_eht_addr));
299		parent = *link;
300		if (result < 0)
301			link = &((*link)->rb_left);
302		else if (result > 0)
303			link = &((*link)->rb_right);
304		else
305			return this;
306	}
307
308	/* always allow auto-created zero entry */
309	if (!allow_zero_src && eht_host->num_entries >= PG_SRC_ENT_LIMIT)
310		return NULL;
311
312	set_h = kzalloc(sizeof(*set_h), GFP_ATOMIC);
313	if (!set_h)
314		return NULL;
315
316	memcpy(&set_h->h_addr, &eht_host->h_addr,
317	       sizeof(union net_bridge_eht_addr));
318	set_h->mcast_gc.destroy = br_multicast_destroy_eht_set_entry;
319	set_h->eht_set = eht_set;
320	set_h->h_parent = eht_host;
321	set_h->br = br;
322	timer_setup(&set_h->timer, br_multicast_eht_set_entry_expired, 0);
323
324	hlist_add_head(&set_h->host_list, &eht_host->set_entries);
325	rb_link_node(&set_h->rb_node, parent, link);
326	rb_insert_color(&set_h->rb_node, &eht_set->entry_tree);
327	/* we must not count the auto-created zero entry otherwise we won't be
328	 * able to track the full list of PG_SRC_ENT_LIMIT entries
329	 */
330	if (!allow_zero_src)
331		eht_host->num_entries++;
332
333	return set_h;
334}
335
336static struct net_bridge_group_eht_set *
337__eht_lookup_create_set(struct net_bridge_port_group *pg,
338			union net_bridge_eht_addr *src_addr)
339{
340	struct rb_node **link = &pg->eht_set_tree.rb_node, *parent = NULL;
341	struct net_bridge_group_eht_set *eht_set;
342
343	while (*link) {
344		struct net_bridge_group_eht_set *this;
345		int result;
346
347		this = rb_entry(*link, struct net_bridge_group_eht_set,
348				rb_node);
349		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
350		parent = *link;
351		if (result < 0)
352			link = &((*link)->rb_left);
353		else if (result > 0)
354			link = &((*link)->rb_right);
355		else
356			return this;
357	}
358
359	eht_set = kzalloc(sizeof(*eht_set), GFP_ATOMIC);
360	if (!eht_set)
361		return NULL;
362
363	memcpy(&eht_set->src_addr, src_addr, sizeof(*src_addr));
364	eht_set->mcast_gc.destroy = br_multicast_destroy_eht_set;
365	eht_set->pg = pg;
366	eht_set->br = pg->key.port->br;
367	eht_set->entry_tree = RB_ROOT;
368	timer_setup(&eht_set->timer, br_multicast_eht_set_expired, 0);
369
370	rb_link_node(&eht_set->rb_node, parent, link);
371	rb_insert_color(&eht_set->rb_node, &pg->eht_set_tree);
372
373	return eht_set;
374}
375
376static void br_multicast_ip_src_to_eht_addr(const struct br_ip *src,
377					    union net_bridge_eht_addr *dest)
378{
379	switch (src->proto) {
380	case htons(ETH_P_IP):
381		dest->ip4 = src->src.ip4;
382		break;
383#if IS_ENABLED(CONFIG_IPV6)
384	case htons(ETH_P_IPV6):
385		memcpy(&dest->ip6, &src->src.ip6, sizeof(struct in6_addr));
386		break;
387#endif
388	}
389}
390
391static void br_eht_convert_host_filter_mode(struct net_bridge_port_group *pg,
 
392					    union net_bridge_eht_addr *h_addr,
393					    int filter_mode)
394{
395	struct net_bridge_group_eht_host *eht_host;
396	union net_bridge_eht_addr zero_addr;
397
398	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
399	if (eht_host)
400		eht_host->filter_mode = filter_mode;
401
402	memset(&zero_addr, 0, sizeof(zero_addr));
403	switch (filter_mode) {
404	case MCAST_INCLUDE:
405		br_multicast_del_eht_set_entry(pg, &zero_addr, h_addr);
406		break;
407	case MCAST_EXCLUDE:
408		br_multicast_create_eht_set_entry(pg, &zero_addr, h_addr,
409						  MCAST_EXCLUDE,
410						  true);
411		break;
412	}
413}
414
415static void br_multicast_create_eht_set_entry(struct net_bridge_port_group *pg,
 
416					      union net_bridge_eht_addr *src_addr,
417					      union net_bridge_eht_addr *h_addr,
418					      int filter_mode,
419					      bool allow_zero_src)
420{
421	struct net_bridge_group_eht_set_entry *set_h;
422	struct net_bridge_group_eht_host *eht_host;
423	struct net_bridge *br = pg->key.port->br;
424	struct net_bridge_group_eht_set *eht_set;
425	union net_bridge_eht_addr zero_addr;
426
427	memset(&zero_addr, 0, sizeof(zero_addr));
428	if (!allow_zero_src && !memcmp(src_addr, &zero_addr, sizeof(zero_addr)))
429		return;
430
431	eht_set = __eht_lookup_create_set(pg, src_addr);
432	if (!eht_set)
433		return;
434
435	eht_host = __eht_lookup_create_host(pg, h_addr, filter_mode);
436	if (!eht_host)
437		goto fail_host;
438
439	set_h = __eht_lookup_create_set_entry(br, eht_set, eht_host,
440					      allow_zero_src);
441	if (!set_h)
442		goto fail_set_entry;
443
444	mod_timer(&set_h->timer, jiffies + br_multicast_gmi(br));
445	mod_timer(&eht_set->timer, jiffies + br_multicast_gmi(br));
446
447	return;
448
449fail_set_entry:
450	if (hlist_empty(&eht_host->set_entries))
451		__eht_destroy_host(eht_host);
452fail_host:
453	if (RB_EMPTY_ROOT(&eht_set->entry_tree))
454		br_multicast_del_eht_set(eht_set);
455}
456
457static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
458					   union net_bridge_eht_addr *src_addr,
459					   union net_bridge_eht_addr *h_addr)
460{
461	struct net_bridge_group_eht_set_entry *set_h;
462	struct net_bridge_group_eht_set *eht_set;
463	bool set_deleted = false;
464
465	eht_set = br_multicast_eht_set_lookup(pg, src_addr);
466	if (!eht_set)
467		goto out;
468
469	set_h = br_multicast_eht_set_entry_lookup(eht_set, h_addr);
470	if (!set_h)
471		goto out;
472
473	__eht_del_set_entry(set_h);
474
475	if (RB_EMPTY_ROOT(&eht_set->entry_tree)) {
476		br_multicast_del_eht_set(eht_set);
477		set_deleted = true;
478	}
479
480out:
481	return set_deleted;
482}
483
484static void br_multicast_del_eht_host(struct net_bridge_port_group *pg,
485				      union net_bridge_eht_addr *h_addr)
486{
487	struct net_bridge_group_eht_set_entry *set_h;
488	struct net_bridge_group_eht_host *eht_host;
489	struct hlist_node *tmp;
490
491	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
492	if (!eht_host)
493		return;
494
495	hlist_for_each_entry_safe(set_h, tmp, &eht_host->set_entries, host_list)
496		br_multicast_del_eht_set_entry(set_h->eht_set->pg,
497					       &set_h->eht_set->src_addr,
498					       &set_h->h_addr);
499}
500
501/* create new set entries from reports */
502static void __eht_create_set_entries(struct net_bridge_port_group *pg,
 
503				     union net_bridge_eht_addr *h_addr,
504				     void *srcs,
505				     u32 nsrcs,
506				     size_t addr_size,
507				     int filter_mode)
508{
509	union net_bridge_eht_addr eht_src_addr;
510	u32 src_idx;
511
512	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
513	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
514		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
515		br_multicast_create_eht_set_entry(pg, &eht_src_addr, h_addr,
516						  filter_mode,
517						  false);
518	}
519}
520
521/* delete existing set entries and their (S,G) entries if they were the last */
522static bool __eht_del_set_entries(struct net_bridge_port_group *pg,
523				  union net_bridge_eht_addr *h_addr,
524				  void *srcs,
525				  u32 nsrcs,
526				  size_t addr_size)
527{
528	union net_bridge_eht_addr eht_src_addr;
529	struct net_bridge_group_src *src_ent;
530	bool changed = false;
531	struct br_ip src_ip;
532	u32 src_idx;
533
534	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
535	memset(&src_ip, 0, sizeof(src_ip));
536	src_ip.proto = pg->key.addr.proto;
537	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
538		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
539		if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr, h_addr))
540			continue;
541		memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
542		src_ent = br_multicast_find_group_src(pg, &src_ip);
543		if (!src_ent)
544			continue;
545		br_multicast_del_group_src(src_ent, true);
546		changed = true;
547	}
548
549	return changed;
550}
551
552static bool br_multicast_eht_allow(struct net_bridge_port_group *pg,
 
553				   union net_bridge_eht_addr *h_addr,
554				   void *srcs,
555				   u32 nsrcs,
556				   size_t addr_size)
557{
558	bool changed = false;
559
560	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
561	case MCAST_INCLUDE:
562		__eht_create_set_entries(pg, h_addr, srcs, nsrcs, addr_size,
563					 MCAST_INCLUDE);
564		break;
565	case MCAST_EXCLUDE:
566		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
567						addr_size);
568		break;
569	}
570
571	return changed;
572}
573
574static bool br_multicast_eht_block(struct net_bridge_port_group *pg,
 
575				   union net_bridge_eht_addr *h_addr,
576				   void *srcs,
577				   u32 nsrcs,
578				   size_t addr_size)
579{
580	bool changed = false;
581
582	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
583	case MCAST_INCLUDE:
584		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
585						addr_size);
586		break;
587	case MCAST_EXCLUDE:
588		__eht_create_set_entries(pg, h_addr, srcs, nsrcs, addr_size,
589					 MCAST_EXCLUDE);
590		break;
591	}
592
593	return changed;
594}
595
596/* flush_entries is true when changing mode */
597static bool __eht_inc_exc(struct net_bridge_port_group *pg,
 
598			  union net_bridge_eht_addr *h_addr,
599			  void *srcs,
600			  u32 nsrcs,
601			  size_t addr_size,
602			  unsigned char filter_mode,
603			  bool to_report)
604{
605	bool changed = false, flush_entries = to_report;
606	union net_bridge_eht_addr eht_src_addr;
607
608	if (br_multicast_eht_host_filter_mode(pg, h_addr) != filter_mode)
609		flush_entries = true;
610
611	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
612	/* if we're changing mode del host and its entries */
613	if (flush_entries)
614		br_multicast_del_eht_host(pg, h_addr);
615	__eht_create_set_entries(pg, h_addr, srcs, nsrcs, addr_size,
616				 filter_mode);
617	/* we can be missing sets only if we've deleted some entries */
618	if (flush_entries) {
619		struct net_bridge *br = pg->key.port->br;
620		struct net_bridge_group_eht_set *eht_set;
621		struct net_bridge_group_src *src_ent;
622		struct hlist_node *tmp;
623
624		hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
625			br_multicast_ip_src_to_eht_addr(&src_ent->addr,
626							&eht_src_addr);
627			if (!br_multicast_eht_set_lookup(pg, &eht_src_addr)) {
628				br_multicast_del_group_src(src_ent, true);
629				changed = true;
630				continue;
631			}
632			/* this is an optimization for TO_INCLUDE where we lower
633			 * the set's timeout to LMQT to catch timeout hosts:
634			 * - host A (timing out): set entries X, Y
635			 * - host B: set entry Z (new from current TO_INCLUDE)
636			 *           sends BLOCK Z after LMQT but host A's EHT
637			 *           entries still exist (unless lowered to LMQT
638			 *           so they can timeout with the S,Gs)
639			 * => we wait another LMQT, when we can just delete the
640			 *    group immediately
641			 */
642			if (!(src_ent->flags & BR_SGRP_F_SEND) ||
643			    filter_mode != MCAST_INCLUDE ||
644			    !to_report)
645				continue;
646			eht_set = br_multicast_eht_set_lookup(pg,
647							      &eht_src_addr);
648			if (!eht_set)
649				continue;
650			mod_timer(&eht_set->timer, jiffies + br_multicast_lmqt(br));
651		}
652	}
653
654	return changed;
655}
656
657static bool br_multicast_eht_inc(struct net_bridge_port_group *pg,
 
658				 union net_bridge_eht_addr *h_addr,
659				 void *srcs,
660				 u32 nsrcs,
661				 size_t addr_size,
662				 bool to_report)
663{
664	bool changed;
665
666	changed = __eht_inc_exc(pg, h_addr, srcs, nsrcs, addr_size,
667				MCAST_INCLUDE, to_report);
668	br_eht_convert_host_filter_mode(pg, h_addr, MCAST_INCLUDE);
669
670	return changed;
671}
672
673static bool br_multicast_eht_exc(struct net_bridge_port_group *pg,
 
674				 union net_bridge_eht_addr *h_addr,
675				 void *srcs,
676				 u32 nsrcs,
677				 size_t addr_size,
678				 bool to_report)
679{
680	bool changed;
681
682	changed = __eht_inc_exc(pg, h_addr, srcs, nsrcs, addr_size,
683				MCAST_EXCLUDE, to_report);
684	br_eht_convert_host_filter_mode(pg, h_addr, MCAST_EXCLUDE);
685
686	return changed;
687}
688
689static bool __eht_ip4_handle(struct net_bridge_port_group *pg,
 
690			     union net_bridge_eht_addr *h_addr,
691			     void *srcs,
692			     u32 nsrcs,
693			     int grec_type)
694{
695	bool changed = false, to_report = false;
696
697	switch (grec_type) {
698	case IGMPV3_ALLOW_NEW_SOURCES:
699		br_multicast_eht_allow(pg, h_addr, srcs, nsrcs, sizeof(__be32));
 
700		break;
701	case IGMPV3_BLOCK_OLD_SOURCES:
702		changed = br_multicast_eht_block(pg, h_addr, srcs, nsrcs,
703						 sizeof(__be32));
704		break;
705	case IGMPV3_CHANGE_TO_INCLUDE:
706		to_report = true;
707		fallthrough;
708	case IGMPV3_MODE_IS_INCLUDE:
709		changed = br_multicast_eht_inc(pg, h_addr, srcs, nsrcs,
710					       sizeof(__be32), to_report);
711		break;
712	case IGMPV3_CHANGE_TO_EXCLUDE:
713		to_report = true;
714		fallthrough;
715	case IGMPV3_MODE_IS_EXCLUDE:
716		changed = br_multicast_eht_exc(pg, h_addr, srcs, nsrcs,
717					       sizeof(__be32), to_report);
718		break;
719	}
720
721	return changed;
722}
723
724#if IS_ENABLED(CONFIG_IPV6)
725static bool __eht_ip6_handle(struct net_bridge_port_group *pg,
 
726			     union net_bridge_eht_addr *h_addr,
727			     void *srcs,
728			     u32 nsrcs,
729			     int grec_type)
730{
731	bool changed = false, to_report = false;
732
733	switch (grec_type) {
734	case MLD2_ALLOW_NEW_SOURCES:
735		br_multicast_eht_allow(pg, h_addr, srcs, nsrcs,
736				       sizeof(struct in6_addr));
737		break;
738	case MLD2_BLOCK_OLD_SOURCES:
739		changed = br_multicast_eht_block(pg, h_addr, srcs, nsrcs,
740						 sizeof(struct in6_addr));
741		break;
742	case MLD2_CHANGE_TO_INCLUDE:
743		to_report = true;
744		fallthrough;
745	case MLD2_MODE_IS_INCLUDE:
746		changed = br_multicast_eht_inc(pg, h_addr, srcs, nsrcs,
747					       sizeof(struct in6_addr),
748					       to_report);
749		break;
750	case MLD2_CHANGE_TO_EXCLUDE:
751		to_report = true;
752		fallthrough;
753	case MLD2_MODE_IS_EXCLUDE:
754		changed = br_multicast_eht_exc(pg, h_addr, srcs, nsrcs,
755					       sizeof(struct in6_addr),
756					       to_report);
757		break;
758	}
759
760	return changed;
761}
762#endif
763
764/* true means an entry was deleted */
765bool br_multicast_eht_handle(struct net_bridge_port_group *pg,
 
766			     void *h_addr,
767			     void *srcs,
768			     u32 nsrcs,
769			     size_t addr_size,
770			     int grec_type)
771{
772	bool eht_enabled = !!(pg->key.port->flags & BR_MULTICAST_FAST_LEAVE);
773	union net_bridge_eht_addr eht_host_addr;
774	bool changed = false;
775
776	if (!eht_enabled)
777		goto out;
778
779	memset(&eht_host_addr, 0, sizeof(eht_host_addr));
780	memcpy(&eht_host_addr, h_addr, addr_size);
781	if (addr_size == sizeof(__be32))
782		changed = __eht_ip4_handle(pg, &eht_host_addr, srcs, nsrcs,
783					   grec_type);
784#if IS_ENABLED(CONFIG_IPV6)
785	else
786		changed = __eht_ip6_handle(pg, &eht_host_addr, srcs, nsrcs,
787					   grec_type);
788#endif
789
790out:
791	return changed;
792}
793
794int br_multicast_eht_set_hosts_limit(struct net_bridge_port *p,
795				     u32 eht_hosts_limit)
796{
797	struct net_bridge *br = p->br;
798
799	if (!eht_hosts_limit)
800		return -EINVAL;
801
802	spin_lock_bh(&br->multicast_lock);
803	p->multicast_eht_hosts_limit = eht_hosts_limit;
804	spin_unlock_bh(&br->multicast_lock);
805
806	return 0;
807}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2// Copyright (c) 2020, Nikolay Aleksandrov <nikolay@nvidia.com>
  3#include <linux/err.h>
  4#include <linux/export.h>
  5#include <linux/if_ether.h>
  6#include <linux/igmp.h>
  7#include <linux/in.h>
  8#include <linux/jhash.h>
  9#include <linux/kernel.h>
 10#include <linux/log2.h>
 11#include <linux/netdevice.h>
 12#include <linux/netfilter_bridge.h>
 13#include <linux/random.h>
 14#include <linux/rculist.h>
 15#include <linux/skbuff.h>
 16#include <linux/slab.h>
 17#include <linux/timer.h>
 18#include <linux/inetdevice.h>
 19#include <linux/mroute.h>
 20#include <net/ip.h>
 21#include <net/switchdev.h>
 22#if IS_ENABLED(CONFIG_IPV6)
 23#include <linux/icmpv6.h>
 24#include <net/ipv6.h>
 25#include <net/mld.h>
 26#include <net/ip6_checksum.h>
 27#include <net/addrconf.h>
 28#endif
 29
 30#include "br_private.h"
 31#include "br_private_mcast_eht.h"
 32
 33static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
 34					   union net_bridge_eht_addr *src_addr,
 35					   union net_bridge_eht_addr *h_addr);
 36static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
 37					      struct net_bridge_port_group *pg,
 38					      union net_bridge_eht_addr *src_addr,
 39					      union net_bridge_eht_addr *h_addr,
 40					      int filter_mode,
 41					      bool allow_zero_src);
 42
 43static struct net_bridge_group_eht_host *
 44br_multicast_eht_host_lookup(struct net_bridge_port_group *pg,
 45			     union net_bridge_eht_addr *h_addr)
 46{
 47	struct rb_node *node = pg->eht_host_tree.rb_node;
 48
 49	while (node) {
 50		struct net_bridge_group_eht_host *this;
 51		int result;
 52
 53		this = rb_entry(node, struct net_bridge_group_eht_host,
 54				rb_node);
 55		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
 56		if (result < 0)
 57			node = node->rb_left;
 58		else if (result > 0)
 59			node = node->rb_right;
 60		else
 61			return this;
 62	}
 63
 64	return NULL;
 65}
 66
 67static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group *pg,
 68					     union net_bridge_eht_addr *h_addr)
 69{
 70	struct net_bridge_group_eht_host *eht_host;
 71
 72	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
 73	if (!eht_host)
 74		return MCAST_INCLUDE;
 75
 76	return eht_host->filter_mode;
 77}
 78
 79static struct net_bridge_group_eht_set_entry *
 80br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set *eht_set,
 81				  union net_bridge_eht_addr *h_addr)
 82{
 83	struct rb_node *node = eht_set->entry_tree.rb_node;
 84
 85	while (node) {
 86		struct net_bridge_group_eht_set_entry *this;
 87		int result;
 88
 89		this = rb_entry(node, struct net_bridge_group_eht_set_entry,
 90				rb_node);
 91		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
 92		if (result < 0)
 93			node = node->rb_left;
 94		else if (result > 0)
 95			node = node->rb_right;
 96		else
 97			return this;
 98	}
 99
100	return NULL;
101}
102
103static struct net_bridge_group_eht_set *
104br_multicast_eht_set_lookup(struct net_bridge_port_group *pg,
105			    union net_bridge_eht_addr *src_addr)
106{
107	struct rb_node *node = pg->eht_set_tree.rb_node;
108
109	while (node) {
110		struct net_bridge_group_eht_set *this;
111		int result;
112
113		this = rb_entry(node, struct net_bridge_group_eht_set,
114				rb_node);
115		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
116		if (result < 0)
117			node = node->rb_left;
118		else if (result > 0)
119			node = node->rb_right;
120		else
121			return this;
122	}
123
124	return NULL;
125}
126
127static void __eht_destroy_host(struct net_bridge_group_eht_host *eht_host)
128{
129	WARN_ON(!hlist_empty(&eht_host->set_entries));
130
131	br_multicast_eht_hosts_dec(eht_host->pg);
132
133	rb_erase(&eht_host->rb_node, &eht_host->pg->eht_host_tree);
134	RB_CLEAR_NODE(&eht_host->rb_node);
135	kfree(eht_host);
136}
137
138static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
139{
140	struct net_bridge_group_eht_set_entry *set_h;
141
142	set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
143	WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
144
145	timer_shutdown_sync(&set_h->timer);
146	kfree(set_h);
147}
148
149static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
150{
151	struct net_bridge_group_eht_set *eht_set;
152
153	eht_set = container_of(gc, struct net_bridge_group_eht_set, mcast_gc);
154	WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
155	WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
156
157	timer_shutdown_sync(&eht_set->timer);
158	kfree(eht_set);
159}
160
161static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry *set_h)
162{
163	struct net_bridge_group_eht_host *eht_host = set_h->h_parent;
164	union net_bridge_eht_addr zero_addr;
165
166	rb_erase(&set_h->rb_node, &set_h->eht_set->entry_tree);
167	RB_CLEAR_NODE(&set_h->rb_node);
168	hlist_del_init(&set_h->host_list);
169	memset(&zero_addr, 0, sizeof(zero_addr));
170	if (memcmp(&set_h->h_addr, &zero_addr, sizeof(zero_addr)))
171		eht_host->num_entries--;
172	hlist_add_head(&set_h->mcast_gc.gc_node, &set_h->br->mcast_gc_list);
173	queue_work(system_long_wq, &set_h->br->mcast_gc_work);
174
175	if (hlist_empty(&eht_host->set_entries))
176		__eht_destroy_host(eht_host);
177}
178
179static void br_multicast_del_eht_set(struct net_bridge_group_eht_set *eht_set)
180{
181	struct net_bridge_group_eht_set_entry *set_h;
182	struct rb_node *node;
183
184	while ((node = rb_first(&eht_set->entry_tree))) {
185		set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
186				 rb_node);
187		__eht_del_set_entry(set_h);
188	}
189
190	rb_erase(&eht_set->rb_node, &eht_set->pg->eht_set_tree);
191	RB_CLEAR_NODE(&eht_set->rb_node);
192	hlist_add_head(&eht_set->mcast_gc.gc_node, &eht_set->br->mcast_gc_list);
193	queue_work(system_long_wq, &eht_set->br->mcast_gc_work);
194}
195
196void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg)
197{
198	struct net_bridge_group_eht_set *eht_set;
199	struct rb_node *node;
200
201	while ((node = rb_first(&pg->eht_set_tree))) {
202		eht_set = rb_entry(node, struct net_bridge_group_eht_set,
203				   rb_node);
204		br_multicast_del_eht_set(eht_set);
205	}
206}
207
208static void br_multicast_eht_set_entry_expired(struct timer_list *t)
209{
210	struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer);
211	struct net_bridge *br = set_h->br;
212
213	spin_lock(&br->multicast_lock);
214	if (RB_EMPTY_NODE(&set_h->rb_node) || timer_pending(&set_h->timer))
215		goto out;
216
217	br_multicast_del_eht_set_entry(set_h->eht_set->pg,
218				       &set_h->eht_set->src_addr,
219				       &set_h->h_addr);
220out:
221	spin_unlock(&br->multicast_lock);
222}
223
224static void br_multicast_eht_set_expired(struct timer_list *t)
225{
226	struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t,
227							      timer);
228	struct net_bridge *br = eht_set->br;
229
230	spin_lock(&br->multicast_lock);
231	if (RB_EMPTY_NODE(&eht_set->rb_node) || timer_pending(&eht_set->timer))
232		goto out;
233
234	br_multicast_del_eht_set(eht_set);
235out:
236	spin_unlock(&br->multicast_lock);
237}
238
239static struct net_bridge_group_eht_host *
240__eht_lookup_create_host(struct net_bridge_port_group *pg,
241			 union net_bridge_eht_addr *h_addr,
242			 unsigned char filter_mode)
243{
244	struct rb_node **link = &pg->eht_host_tree.rb_node, *parent = NULL;
245	struct net_bridge_group_eht_host *eht_host;
246
247	while (*link) {
248		struct net_bridge_group_eht_host *this;
249		int result;
250
251		this = rb_entry(*link, struct net_bridge_group_eht_host,
252				rb_node);
253		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
254		parent = *link;
255		if (result < 0)
256			link = &((*link)->rb_left);
257		else if (result > 0)
258			link = &((*link)->rb_right);
259		else
260			return this;
261	}
262
263	if (br_multicast_eht_hosts_over_limit(pg))
264		return NULL;
265
266	eht_host = kzalloc(sizeof(*eht_host), GFP_ATOMIC);
267	if (!eht_host)
268		return NULL;
269
270	memcpy(&eht_host->h_addr, h_addr, sizeof(*h_addr));
271	INIT_HLIST_HEAD(&eht_host->set_entries);
272	eht_host->pg = pg;
273	eht_host->filter_mode = filter_mode;
274
275	rb_link_node(&eht_host->rb_node, parent, link);
276	rb_insert_color(&eht_host->rb_node, &pg->eht_host_tree);
277
278	br_multicast_eht_hosts_inc(pg);
279
280	return eht_host;
281}
282
283static struct net_bridge_group_eht_set_entry *
284__eht_lookup_create_set_entry(struct net_bridge *br,
285			      struct net_bridge_group_eht_set *eht_set,
286			      struct net_bridge_group_eht_host *eht_host,
287			      bool allow_zero_src)
288{
289	struct rb_node **link = &eht_set->entry_tree.rb_node, *parent = NULL;
290	struct net_bridge_group_eht_set_entry *set_h;
291
292	while (*link) {
293		struct net_bridge_group_eht_set_entry *this;
294		int result;
295
296		this = rb_entry(*link, struct net_bridge_group_eht_set_entry,
297				rb_node);
298		result = memcmp(&eht_host->h_addr, &this->h_addr,
299				sizeof(union net_bridge_eht_addr));
300		parent = *link;
301		if (result < 0)
302			link = &((*link)->rb_left);
303		else if (result > 0)
304			link = &((*link)->rb_right);
305		else
306			return this;
307	}
308
309	/* always allow auto-created zero entry */
310	if (!allow_zero_src && eht_host->num_entries >= PG_SRC_ENT_LIMIT)
311		return NULL;
312
313	set_h = kzalloc(sizeof(*set_h), GFP_ATOMIC);
314	if (!set_h)
315		return NULL;
316
317	memcpy(&set_h->h_addr, &eht_host->h_addr,
318	       sizeof(union net_bridge_eht_addr));
319	set_h->mcast_gc.destroy = br_multicast_destroy_eht_set_entry;
320	set_h->eht_set = eht_set;
321	set_h->h_parent = eht_host;
322	set_h->br = br;
323	timer_setup(&set_h->timer, br_multicast_eht_set_entry_expired, 0);
324
325	hlist_add_head(&set_h->host_list, &eht_host->set_entries);
326	rb_link_node(&set_h->rb_node, parent, link);
327	rb_insert_color(&set_h->rb_node, &eht_set->entry_tree);
328	/* we must not count the auto-created zero entry otherwise we won't be
329	 * able to track the full list of PG_SRC_ENT_LIMIT entries
330	 */
331	if (!allow_zero_src)
332		eht_host->num_entries++;
333
334	return set_h;
335}
336
337static struct net_bridge_group_eht_set *
338__eht_lookup_create_set(struct net_bridge_port_group *pg,
339			union net_bridge_eht_addr *src_addr)
340{
341	struct rb_node **link = &pg->eht_set_tree.rb_node, *parent = NULL;
342	struct net_bridge_group_eht_set *eht_set;
343
344	while (*link) {
345		struct net_bridge_group_eht_set *this;
346		int result;
347
348		this = rb_entry(*link, struct net_bridge_group_eht_set,
349				rb_node);
350		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
351		parent = *link;
352		if (result < 0)
353			link = &((*link)->rb_left);
354		else if (result > 0)
355			link = &((*link)->rb_right);
356		else
357			return this;
358	}
359
360	eht_set = kzalloc(sizeof(*eht_set), GFP_ATOMIC);
361	if (!eht_set)
362		return NULL;
363
364	memcpy(&eht_set->src_addr, src_addr, sizeof(*src_addr));
365	eht_set->mcast_gc.destroy = br_multicast_destroy_eht_set;
366	eht_set->pg = pg;
367	eht_set->br = pg->key.port->br;
368	eht_set->entry_tree = RB_ROOT;
369	timer_setup(&eht_set->timer, br_multicast_eht_set_expired, 0);
370
371	rb_link_node(&eht_set->rb_node, parent, link);
372	rb_insert_color(&eht_set->rb_node, &pg->eht_set_tree);
373
374	return eht_set;
375}
376
377static void br_multicast_ip_src_to_eht_addr(const struct br_ip *src,
378					    union net_bridge_eht_addr *dest)
379{
380	switch (src->proto) {
381	case htons(ETH_P_IP):
382		dest->ip4 = src->src.ip4;
383		break;
384#if IS_ENABLED(CONFIG_IPV6)
385	case htons(ETH_P_IPV6):
386		memcpy(&dest->ip6, &src->src.ip6, sizeof(struct in6_addr));
387		break;
388#endif
389	}
390}
391
392static void br_eht_convert_host_filter_mode(const struct net_bridge_mcast *brmctx,
393					    struct net_bridge_port_group *pg,
394					    union net_bridge_eht_addr *h_addr,
395					    int filter_mode)
396{
397	struct net_bridge_group_eht_host *eht_host;
398	union net_bridge_eht_addr zero_addr;
399
400	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
401	if (eht_host)
402		eht_host->filter_mode = filter_mode;
403
404	memset(&zero_addr, 0, sizeof(zero_addr));
405	switch (filter_mode) {
406	case MCAST_INCLUDE:
407		br_multicast_del_eht_set_entry(pg, &zero_addr, h_addr);
408		break;
409	case MCAST_EXCLUDE:
410		br_multicast_create_eht_set_entry(brmctx, pg, &zero_addr,
411						  h_addr, MCAST_EXCLUDE,
412						  true);
413		break;
414	}
415}
416
417static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
418					      struct net_bridge_port_group *pg,
419					      union net_bridge_eht_addr *src_addr,
420					      union net_bridge_eht_addr *h_addr,
421					      int filter_mode,
422					      bool allow_zero_src)
423{
424	struct net_bridge_group_eht_set_entry *set_h;
425	struct net_bridge_group_eht_host *eht_host;
426	struct net_bridge *br = pg->key.port->br;
427	struct net_bridge_group_eht_set *eht_set;
428	union net_bridge_eht_addr zero_addr;
429
430	memset(&zero_addr, 0, sizeof(zero_addr));
431	if (!allow_zero_src && !memcmp(src_addr, &zero_addr, sizeof(zero_addr)))
432		return;
433
434	eht_set = __eht_lookup_create_set(pg, src_addr);
435	if (!eht_set)
436		return;
437
438	eht_host = __eht_lookup_create_host(pg, h_addr, filter_mode);
439	if (!eht_host)
440		goto fail_host;
441
442	set_h = __eht_lookup_create_set_entry(br, eht_set, eht_host,
443					      allow_zero_src);
444	if (!set_h)
445		goto fail_set_entry;
446
447	mod_timer(&set_h->timer, jiffies + br_multicast_gmi(brmctx));
448	mod_timer(&eht_set->timer, jiffies + br_multicast_gmi(brmctx));
449
450	return;
451
452fail_set_entry:
453	if (hlist_empty(&eht_host->set_entries))
454		__eht_destroy_host(eht_host);
455fail_host:
456	if (RB_EMPTY_ROOT(&eht_set->entry_tree))
457		br_multicast_del_eht_set(eht_set);
458}
459
460static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
461					   union net_bridge_eht_addr *src_addr,
462					   union net_bridge_eht_addr *h_addr)
463{
464	struct net_bridge_group_eht_set_entry *set_h;
465	struct net_bridge_group_eht_set *eht_set;
466	bool set_deleted = false;
467
468	eht_set = br_multicast_eht_set_lookup(pg, src_addr);
469	if (!eht_set)
470		goto out;
471
472	set_h = br_multicast_eht_set_entry_lookup(eht_set, h_addr);
473	if (!set_h)
474		goto out;
475
476	__eht_del_set_entry(set_h);
477
478	if (RB_EMPTY_ROOT(&eht_set->entry_tree)) {
479		br_multicast_del_eht_set(eht_set);
480		set_deleted = true;
481	}
482
483out:
484	return set_deleted;
485}
486
487static void br_multicast_del_eht_host(struct net_bridge_port_group *pg,
488				      union net_bridge_eht_addr *h_addr)
489{
490	struct net_bridge_group_eht_set_entry *set_h;
491	struct net_bridge_group_eht_host *eht_host;
492	struct hlist_node *tmp;
493
494	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
495	if (!eht_host)
496		return;
497
498	hlist_for_each_entry_safe(set_h, tmp, &eht_host->set_entries, host_list)
499		br_multicast_del_eht_set_entry(set_h->eht_set->pg,
500					       &set_h->eht_set->src_addr,
501					       &set_h->h_addr);
502}
503
504/* create new set entries from reports */
505static void __eht_create_set_entries(const struct net_bridge_mcast *brmctx,
506				     struct net_bridge_port_group *pg,
507				     union net_bridge_eht_addr *h_addr,
508				     void *srcs,
509				     u32 nsrcs,
510				     size_t addr_size,
511				     int filter_mode)
512{
513	union net_bridge_eht_addr eht_src_addr;
514	u32 src_idx;
515
516	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
517	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
518		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
519		br_multicast_create_eht_set_entry(brmctx, pg, &eht_src_addr,
520						  h_addr, filter_mode,
521						  false);
522	}
523}
524
525/* delete existing set entries and their (S,G) entries if they were the last */
526static bool __eht_del_set_entries(struct net_bridge_port_group *pg,
527				  union net_bridge_eht_addr *h_addr,
528				  void *srcs,
529				  u32 nsrcs,
530				  size_t addr_size)
531{
532	union net_bridge_eht_addr eht_src_addr;
533	struct net_bridge_group_src *src_ent;
534	bool changed = false;
535	struct br_ip src_ip;
536	u32 src_idx;
537
538	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
539	memset(&src_ip, 0, sizeof(src_ip));
540	src_ip.proto = pg->key.addr.proto;
541	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
542		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
543		if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr, h_addr))
544			continue;
545		memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
546		src_ent = br_multicast_find_group_src(pg, &src_ip);
547		if (!src_ent)
548			continue;
549		br_multicast_del_group_src(src_ent, true);
550		changed = true;
551	}
552
553	return changed;
554}
555
556static bool br_multicast_eht_allow(const struct net_bridge_mcast *brmctx,
557				   struct net_bridge_port_group *pg,
558				   union net_bridge_eht_addr *h_addr,
559				   void *srcs,
560				   u32 nsrcs,
561				   size_t addr_size)
562{
563	bool changed = false;
564
565	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
566	case MCAST_INCLUDE:
567		__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs,
568					 addr_size, MCAST_INCLUDE);
569		break;
570	case MCAST_EXCLUDE:
571		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
572						addr_size);
573		break;
574	}
575
576	return changed;
577}
578
579static bool br_multicast_eht_block(const struct net_bridge_mcast *brmctx,
580				   struct net_bridge_port_group *pg,
581				   union net_bridge_eht_addr *h_addr,
582				   void *srcs,
583				   u32 nsrcs,
584				   size_t addr_size)
585{
586	bool changed = false;
587
588	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
589	case MCAST_INCLUDE:
590		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
591						addr_size);
592		break;
593	case MCAST_EXCLUDE:
594		__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
595					 MCAST_EXCLUDE);
596		break;
597	}
598
599	return changed;
600}
601
602/* flush_entries is true when changing mode */
603static bool __eht_inc_exc(const struct net_bridge_mcast *brmctx,
604			  struct net_bridge_port_group *pg,
605			  union net_bridge_eht_addr *h_addr,
606			  void *srcs,
607			  u32 nsrcs,
608			  size_t addr_size,
609			  unsigned char filter_mode,
610			  bool to_report)
611{
612	bool changed = false, flush_entries = to_report;
613	union net_bridge_eht_addr eht_src_addr;
614
615	if (br_multicast_eht_host_filter_mode(pg, h_addr) != filter_mode)
616		flush_entries = true;
617
618	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
619	/* if we're changing mode del host and its entries */
620	if (flush_entries)
621		br_multicast_del_eht_host(pg, h_addr);
622	__eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
623				 filter_mode);
624	/* we can be missing sets only if we've deleted some entries */
625	if (flush_entries) {
 
626		struct net_bridge_group_eht_set *eht_set;
627		struct net_bridge_group_src *src_ent;
628		struct hlist_node *tmp;
629
630		hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
631			br_multicast_ip_src_to_eht_addr(&src_ent->addr,
632							&eht_src_addr);
633			if (!br_multicast_eht_set_lookup(pg, &eht_src_addr)) {
634				br_multicast_del_group_src(src_ent, true);
635				changed = true;
636				continue;
637			}
638			/* this is an optimization for TO_INCLUDE where we lower
639			 * the set's timeout to LMQT to catch timeout hosts:
640			 * - host A (timing out): set entries X, Y
641			 * - host B: set entry Z (new from current TO_INCLUDE)
642			 *           sends BLOCK Z after LMQT but host A's EHT
643			 *           entries still exist (unless lowered to LMQT
644			 *           so they can timeout with the S,Gs)
645			 * => we wait another LMQT, when we can just delete the
646			 *    group immediately
647			 */
648			if (!(src_ent->flags & BR_SGRP_F_SEND) ||
649			    filter_mode != MCAST_INCLUDE ||
650			    !to_report)
651				continue;
652			eht_set = br_multicast_eht_set_lookup(pg,
653							      &eht_src_addr);
654			if (!eht_set)
655				continue;
656			mod_timer(&eht_set->timer, jiffies + br_multicast_lmqt(brmctx));
657		}
658	}
659
660	return changed;
661}
662
663static bool br_multicast_eht_inc(const struct net_bridge_mcast *brmctx,
664				 struct net_bridge_port_group *pg,
665				 union net_bridge_eht_addr *h_addr,
666				 void *srcs,
667				 u32 nsrcs,
668				 size_t addr_size,
669				 bool to_report)
670{
671	bool changed;
672
673	changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
674				MCAST_INCLUDE, to_report);
675	br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_INCLUDE);
676
677	return changed;
678}
679
680static bool br_multicast_eht_exc(const struct net_bridge_mcast *brmctx,
681				 struct net_bridge_port_group *pg,
682				 union net_bridge_eht_addr *h_addr,
683				 void *srcs,
684				 u32 nsrcs,
685				 size_t addr_size,
686				 bool to_report)
687{
688	bool changed;
689
690	changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
691				MCAST_EXCLUDE, to_report);
692	br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_EXCLUDE);
693
694	return changed;
695}
696
697static bool __eht_ip4_handle(const struct net_bridge_mcast *brmctx,
698			     struct net_bridge_port_group *pg,
699			     union net_bridge_eht_addr *h_addr,
700			     void *srcs,
701			     u32 nsrcs,
702			     int grec_type)
703{
704	bool changed = false, to_report = false;
705
706	switch (grec_type) {
707	case IGMPV3_ALLOW_NEW_SOURCES:
708		br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
709				       sizeof(__be32));
710		break;
711	case IGMPV3_BLOCK_OLD_SOURCES:
712		changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
713						 sizeof(__be32));
714		break;
715	case IGMPV3_CHANGE_TO_INCLUDE:
716		to_report = true;
717		fallthrough;
718	case IGMPV3_MODE_IS_INCLUDE:
719		changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
720					       sizeof(__be32), to_report);
721		break;
722	case IGMPV3_CHANGE_TO_EXCLUDE:
723		to_report = true;
724		fallthrough;
725	case IGMPV3_MODE_IS_EXCLUDE:
726		changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
727					       sizeof(__be32), to_report);
728		break;
729	}
730
731	return changed;
732}
733
734#if IS_ENABLED(CONFIG_IPV6)
735static bool __eht_ip6_handle(const struct net_bridge_mcast *brmctx,
736			     struct net_bridge_port_group *pg,
737			     union net_bridge_eht_addr *h_addr,
738			     void *srcs,
739			     u32 nsrcs,
740			     int grec_type)
741{
742	bool changed = false, to_report = false;
743
744	switch (grec_type) {
745	case MLD2_ALLOW_NEW_SOURCES:
746		br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
747				       sizeof(struct in6_addr));
748		break;
749	case MLD2_BLOCK_OLD_SOURCES:
750		changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
751						 sizeof(struct in6_addr));
752		break;
753	case MLD2_CHANGE_TO_INCLUDE:
754		to_report = true;
755		fallthrough;
756	case MLD2_MODE_IS_INCLUDE:
757		changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
758					       sizeof(struct in6_addr),
759					       to_report);
760		break;
761	case MLD2_CHANGE_TO_EXCLUDE:
762		to_report = true;
763		fallthrough;
764	case MLD2_MODE_IS_EXCLUDE:
765		changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
766					       sizeof(struct in6_addr),
767					       to_report);
768		break;
769	}
770
771	return changed;
772}
773#endif
774
775/* true means an entry was deleted */
776bool br_multicast_eht_handle(const struct net_bridge_mcast *brmctx,
777			     struct net_bridge_port_group *pg,
778			     void *h_addr,
779			     void *srcs,
780			     u32 nsrcs,
781			     size_t addr_size,
782			     int grec_type)
783{
784	bool eht_enabled = !!(pg->key.port->flags & BR_MULTICAST_FAST_LEAVE);
785	union net_bridge_eht_addr eht_host_addr;
786	bool changed = false;
787
788	if (!eht_enabled)
789		goto out;
790
791	memset(&eht_host_addr, 0, sizeof(eht_host_addr));
792	memcpy(&eht_host_addr, h_addr, addr_size);
793	if (addr_size == sizeof(__be32))
794		changed = __eht_ip4_handle(brmctx, pg, &eht_host_addr, srcs,
795					   nsrcs, grec_type);
796#if IS_ENABLED(CONFIG_IPV6)
797	else
798		changed = __eht_ip6_handle(brmctx, pg, &eht_host_addr, srcs,
799					   nsrcs, grec_type);
800#endif
801
802out:
803	return changed;
804}
805
806int br_multicast_eht_set_hosts_limit(struct net_bridge_port *p,
807				     u32 eht_hosts_limit)
808{
809	struct net_bridge *br = p->br;
810
811	if (!eht_hosts_limit)
812		return -EINVAL;
813
814	spin_lock_bh(&br->multicast_lock);
815	p->multicast_eht_hosts_limit = eht_hosts_limit;
816	spin_unlock_bh(&br->multicast_lock);
817
818	return 0;
819}