Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Bridge multicast support.
   4 *
   5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
   6 */
   7
   8#include <linux/err.h>
   9#include <linux/export.h>
  10#include <linux/if_ether.h>
  11#include <linux/igmp.h>
  12#include <linux/in.h>
  13#include <linux/jhash.h>
  14#include <linux/kernel.h>
  15#include <linux/log2.h>
  16#include <linux/netdevice.h>
  17#include <linux/netfilter_bridge.h>
  18#include <linux/random.h>
  19#include <linux/rculist.h>
  20#include <linux/skbuff.h>
  21#include <linux/slab.h>
  22#include <linux/timer.h>
  23#include <linux/inetdevice.h>
  24#include <linux/mroute.h>
  25#include <net/ip.h>
  26#include <net/switchdev.h>
  27#if IS_ENABLED(CONFIG_IPV6)
  28#include <linux/icmpv6.h>
  29#include <net/ipv6.h>
  30#include <net/mld.h>
  31#include <net/ip6_checksum.h>
  32#include <net/addrconf.h>
  33#endif
  34
  35#include "br_private.h"
 
  36
  37static const struct rhashtable_params br_mdb_rht_params = {
  38	.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
  39	.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
  40	.key_len = sizeof(struct br_ip),
  41	.automatic_shrinking = true,
  42};
  43
  44static void br_multicast_start_querier(struct net_bridge *br,
 
 
 
 
 
 
 
  45				       struct bridge_mcast_own_query *query);
  46static void br_multicast_add_router(struct net_bridge *br,
  47				    struct net_bridge_port *port);
  48static void br_ip4_multicast_leave_group(struct net_bridge *br,
  49					 struct net_bridge_port *port,
  50					 __be32 group,
  51					 __u16 vid,
  52					 const unsigned char *src);
 
  53
  54static void __del_port_router(struct net_bridge_port *p);
 
 
 
  55#if IS_ENABLED(CONFIG_IPV6)
  56static void br_ip6_multicast_leave_group(struct net_bridge *br,
  57					 struct net_bridge_port *port,
  58					 const struct in6_addr *group,
  59					 __u16 vid, const unsigned char *src);
  60#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  61
  62static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
  63						      struct br_ip *dst)
  64{
  65	return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  66}
  67
  68struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
  69					   struct br_ip *dst)
  70{
  71	struct net_bridge_mdb_entry *ent;
  72
  73	lockdep_assert_held_once(&br->multicast_lock);
  74
  75	rcu_read_lock();
  76	ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  77	rcu_read_unlock();
  78
  79	return ent;
  80}
  81
  82static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
  83						   __be32 dst, __u16 vid)
  84{
  85	struct br_ip br_dst;
  86
  87	memset(&br_dst, 0, sizeof(br_dst));
  88	br_dst.u.ip4 = dst;
  89	br_dst.proto = htons(ETH_P_IP);
  90	br_dst.vid = vid;
  91
  92	return br_mdb_ip_get(br, &br_dst);
  93}
  94
  95#if IS_ENABLED(CONFIG_IPV6)
  96static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
  97						   const struct in6_addr *dst,
  98						   __u16 vid)
  99{
 100	struct br_ip br_dst;
 101
 102	memset(&br_dst, 0, sizeof(br_dst));
 103	br_dst.u.ip6 = *dst;
 104	br_dst.proto = htons(ETH_P_IPV6);
 105	br_dst.vid = vid;
 106
 107	return br_mdb_ip_get(br, &br_dst);
 108}
 109#endif
 110
 111struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
 112					struct sk_buff *skb, u16 vid)
 113{
 
 114	struct br_ip ip;
 115
 116	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
 
 117		return NULL;
 118
 119	if (BR_INPUT_SKB_CB(skb)->igmp)
 120		return NULL;
 121
 122	memset(&ip, 0, sizeof(ip));
 123	ip.proto = skb->protocol;
 124	ip.vid = vid;
 125
 126	switch (skb->protocol) {
 127	case htons(ETH_P_IP):
 128		ip.u.ip4 = ip_hdr(skb)->daddr;
 
 
 
 
 
 
 
 
 
 129		break;
 130#if IS_ENABLED(CONFIG_IPV6)
 131	case htons(ETH_P_IPV6):
 132		ip.u.ip6 = ipv6_hdr(skb)->daddr;
 
 
 
 
 
 
 
 
 
 133		break;
 134#endif
 135	default:
 136		return NULL;
 
 137	}
 138
 139	return br_mdb_ip_get_rcu(br, &ip);
 140}
 141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142static void br_multicast_group_expired(struct timer_list *t)
 143{
 144	struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
 145	struct net_bridge *br = mp->br;
 146
 147	spin_lock(&br->multicast_lock);
 148	if (!netif_running(br->dev) || timer_pending(&mp->timer))
 
 149		goto out;
 150
 151	br_multicast_host_leave(mp, true);
 152
 153	if (mp->ports)
 154		goto out;
 
 
 
 
 155
 156	rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
 157			       br_mdb_rht_params);
 158	hlist_del_rcu(&mp->mdb_node);
 159
 160	kfree_rcu(mp, rcu);
 
 161
 162out:
 163	spin_unlock(&br->multicast_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164}
 165
 166static void br_multicast_del_pg(struct net_bridge *br,
 167				struct net_bridge_port_group *pg)
 
 168{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169	struct net_bridge_mdb_entry *mp;
 170	struct net_bridge_port_group *p;
 171	struct net_bridge_port_group __rcu **pp;
 172
 173	mp = br_mdb_ip_get(br, &pg->addr);
 174	if (WARN_ON(!mp))
 175		return;
 176
 177	for (pp = &mp->ports;
 178	     (p = mlock_dereference(*pp, br)) != NULL;
 179	     pp = &p->next) {
 180		if (p != pg)
 181			continue;
 182
 183		rcu_assign_pointer(*pp, p->next);
 184		hlist_del_init(&p->mglist);
 185		del_timer(&p->timer);
 186		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
 187			      p->flags);
 188		kfree_rcu(p, rcu);
 189
 190		if (!mp->ports && !mp->host_joined &&
 191		    netif_running(br->dev))
 192			mod_timer(&mp->timer, jiffies);
 193
 194		return;
 195	}
 196
 197	WARN_ON(1);
 198}
 199
 200static void br_multicast_port_group_expired(struct timer_list *t)
 201{
 202	struct net_bridge_port_group *pg = from_timer(pg, t, timer);
 203	struct net_bridge *br = pg->port->br;
 
 
 
 204
 205	spin_lock(&br->multicast_lock);
 206	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
 207	    hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
 208		goto out;
 209
 210	br_multicast_del_pg(br, pg);
 
 
 
 
 
 
 
 
 
 
 
 
 211
 
 
 
 
 
 
 
 212out:
 213	spin_unlock(&br->multicast_lock);
 214}
 215
 216static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
 217						    __be32 group,
 218						    u8 *igmp_type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219{
 
 
 
 
 220	struct igmpv3_query *ihv3;
 221	size_t igmp_hdr_size;
 
 222	struct sk_buff *skb;
 223	struct igmphdr *ih;
 224	struct ethhdr *eth;
 
 225	struct iphdr *iph;
 
 226
 227	igmp_hdr_size = sizeof(*ih);
 228	if (br->multicast_igmp_version == 3)
 229		igmp_hdr_size = sizeof(*ihv3);
 230	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
 231						 igmp_hdr_size + 4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232	if (!skb)
 233		goto out;
 234
 
 235	skb->protocol = htons(ETH_P_IP);
 236
 237	skb_reset_mac_header(skb);
 238	eth = eth_hdr(skb);
 239
 240	ether_addr_copy(eth->h_source, br->dev->dev_addr);
 241	eth->h_dest[0] = 1;
 242	eth->h_dest[1] = 0;
 243	eth->h_dest[2] = 0x5e;
 244	eth->h_dest[3] = 0;
 245	eth->h_dest[4] = 0;
 246	eth->h_dest[5] = 1;
 247	eth->h_proto = htons(ETH_P_IP);
 248	skb_put(skb, sizeof(*eth));
 249
 250	skb_set_network_header(skb, skb->len);
 251	iph = ip_hdr(skb);
 
 252
 253	iph->version = 4;
 254	iph->ihl = 6;
 255	iph->tos = 0xc0;
 256	iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
 257	iph->id = 0;
 258	iph->frag_off = htons(IP_DF);
 259	iph->ttl = 1;
 260	iph->protocol = IPPROTO_IGMP;
 261	iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
 262		     inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
 263	iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
 264	((u8 *)&iph[1])[0] = IPOPT_RA;
 265	((u8 *)&iph[1])[1] = 4;
 266	((u8 *)&iph[1])[2] = 0;
 267	((u8 *)&iph[1])[3] = 0;
 268	ip_send_check(iph);
 269	skb_put(skb, 24);
 270
 271	skb_set_transport_header(skb, skb->len);
 272	*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
 273
 274	switch (br->multicast_igmp_version) {
 275	case 2:
 276		ih = igmp_hdr(skb);
 277		ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
 278		ih->code = (group ? br->multicast_last_member_interval :
 279				    br->multicast_query_response_interval) /
 280			   (HZ / IGMP_TIMER_SCALE);
 281		ih->group = group;
 282		ih->csum = 0;
 283		ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
 
 284		break;
 285	case 3:
 286		ihv3 = igmpv3_query_hdr(skb);
 287		ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
 288		ihv3->code = (group ? br->multicast_last_member_interval :
 289				      br->multicast_query_response_interval) /
 290			     (HZ / IGMP_TIMER_SCALE);
 291		ihv3->group = group;
 292		ihv3->qqic = br->multicast_query_interval / HZ;
 293		ihv3->nsrcs = 0;
 294		ihv3->resv = 0;
 295		ihv3->suppress = 0;
 296		ihv3->qrv = 2;
 297		ihv3->csum = 0;
 298		ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 299		break;
 300	}
 301
 
 
 
 
 
 
 302	skb_put(skb, igmp_hdr_size);
 303	__skb_pull(skb, sizeof(*eth));
 304
 305out:
 306	return skb;
 307}
 308
 309#if IS_ENABLED(CONFIG_IPV6)
 310static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
 311						    const struct in6_addr *grp,
 312						    u8 *igmp_type)
 313{
 
 
 
 
 
 
 
 
 
 314	struct mld2_query *mld2q;
 
 315	unsigned long interval;
 
 316	struct ipv6hdr *ip6h;
 317	struct mld_msg *mldq;
 318	size_t mld_hdr_size;
 319	struct sk_buff *skb;
 
 320	struct ethhdr *eth;
 
 321	u8 *hopopt;
 322
 323	mld_hdr_size = sizeof(*mldq);
 324	if (br->multicast_mld_version == 2)
 325		mld_hdr_size = sizeof(*mld2q);
 326	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
 327						 8 + mld_hdr_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328	if (!skb)
 329		goto out;
 330
 
 331	skb->protocol = htons(ETH_P_IPV6);
 332
 333	/* Ethernet header */
 334	skb_reset_mac_header(skb);
 335	eth = eth_hdr(skb);
 336
 337	ether_addr_copy(eth->h_source, br->dev->dev_addr);
 338	eth->h_proto = htons(ETH_P_IPV6);
 339	skb_put(skb, sizeof(*eth));
 340
 341	/* IPv6 header + HbH option */
 342	skb_set_network_header(skb, skb->len);
 343	ip6h = ipv6_hdr(skb);
 344
 345	*(__force __be32 *)ip6h = htonl(0x60000000);
 346	ip6h->payload_len = htons(8 + mld_hdr_size);
 347	ip6h->nexthdr = IPPROTO_HOPOPTS;
 348	ip6h->hop_limit = 1;
 349	ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
 350	if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
 351			       &ip6h->saddr)) {
 352		kfree_skb(skb);
 353		br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
 354		return NULL;
 355	}
 356
 357	br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
 358	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 359
 360	hopopt = (u8 *)(ip6h + 1);
 361	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
 362	hopopt[1] = 0;				/* length of HbH */
 363	hopopt[2] = IPV6_TLV_ROUTERALERT;	/* Router Alert */
 364	hopopt[3] = 2;				/* Length of RA Option */
 365	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
 366	hopopt[5] = 0;
 367	hopopt[6] = IPV6_TLV_PAD1;		/* Pad1 */
 368	hopopt[7] = IPV6_TLV_PAD1;		/* Pad1 */
 369
 370	skb_put(skb, sizeof(*ip6h) + 8);
 371
 372	/* ICMPv6 */
 373	skb_set_transport_header(skb, skb->len);
 374	interval = ipv6_addr_any(grp) ?
 375			br->multicast_query_response_interval :
 376			br->multicast_last_member_interval;
 377	*igmp_type = ICMPV6_MGM_QUERY;
 378	switch (br->multicast_mld_version) {
 379	case 1:
 380		mldq = (struct mld_msg *)icmp6_hdr(skb);
 381		mldq->mld_type = ICMPV6_MGM_QUERY;
 382		mldq->mld_code = 0;
 383		mldq->mld_cksum = 0;
 384		mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
 385		mldq->mld_reserved = 0;
 386		mldq->mld_mca = *grp;
 387		mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 388						  sizeof(*mldq), IPPROTO_ICMPV6,
 389						  csum_partial(mldq,
 390							       sizeof(*mldq),
 391							       0));
 392		break;
 393	case 2:
 394		mld2q = (struct mld2_query *)icmp6_hdr(skb);
 395		mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
 396		mld2q->mld2q_type = ICMPV6_MGM_QUERY;
 397		mld2q->mld2q_code = 0;
 398		mld2q->mld2q_cksum = 0;
 399		mld2q->mld2q_resv1 = 0;
 400		mld2q->mld2q_resv2 = 0;
 401		mld2q->mld2q_suppress = 0;
 402		mld2q->mld2q_qrv = 2;
 403		mld2q->mld2q_nsrcs = 0;
 404		mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
 405		mld2q->mld2q_mca = *grp;
 406		mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 407						     sizeof(*mld2q),
 408						     IPPROTO_ICMPV6,
 409						     csum_partial(mld2q,
 410								  sizeof(*mld2q),
 411								  0));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 412		break;
 413	}
 414	skb_put(skb, mld_hdr_size);
 415
 
 
 
 
 
 
 
 
 
 416	__skb_pull(skb, sizeof(*eth));
 417
 418out:
 419	return skb;
 420}
 421#endif
 422
 423static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
 424						struct br_ip *addr,
 425						u8 *igmp_type)
 
 
 
 
 
 426{
 427	switch (addr->proto) {
 
 
 428	case htons(ETH_P_IP):
 429		return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
 
 
 
 
 
 430#if IS_ENABLED(CONFIG_IPV6)
 431	case htons(ETH_P_IPV6):
 432		return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
 433						    igmp_type);
 
 
 
 
 
 
 
 
 
 
 
 
 434#endif
 435	}
 436	return NULL;
 437}
 438
 439struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
 440						    struct br_ip *group)
 441{
 442	struct net_bridge_mdb_entry *mp;
 443	int err;
 444
 445	mp = br_mdb_ip_get(br, group);
 446	if (mp)
 447		return mp;
 448
 449	if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
 
 450		br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
 451		return ERR_PTR(-E2BIG);
 452	}
 453
 454	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
 455	if (unlikely(!mp))
 456		return ERR_PTR(-ENOMEM);
 457
 458	mp->br = br;
 459	mp->addr = *group;
 
 460	timer_setup(&mp->timer, br_multicast_group_expired, 0);
 461	err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
 462					    br_mdb_rht_params);
 463	if (err) {
 464		kfree(mp);
 465		mp = ERR_PTR(err);
 466	} else {
 467		hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
 468	}
 469
 470	return mp;
 471}
 472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473struct net_bridge_port_group *br_multicast_new_port_group(
 474			struct net_bridge_port *port,
 475			struct br_ip *group,
 476			struct net_bridge_port_group __rcu *next,
 477			unsigned char flags,
 478			const unsigned char *src)
 
 
 479{
 480	struct net_bridge_port_group *p;
 481
 482	p = kzalloc(sizeof(*p), GFP_ATOMIC);
 483	if (unlikely(!p))
 484		return NULL;
 485
 486	p->addr = *group;
 487	p->port = port;
 488	p->flags = flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 489	rcu_assign_pointer(p->next, next);
 490	hlist_add_head(&p->mglist, &port->mglist);
 491	timer_setup(&p->timer, br_multicast_port_group_expired, 0);
 
 
 492
 493	if (src)
 494		memcpy(p->eth_addr, src, ETH_ALEN);
 495	else
 496		eth_broadcast_addr(p->eth_addr);
 497
 498	return p;
 499}
 500
 501static bool br_port_group_equal(struct net_bridge_port_group *p,
 502				struct net_bridge_port *port,
 503				const unsigned char *src)
 504{
 505	if (p->port != port)
 506		return false;
 507
 508	if (!(port->flags & BR_MULTICAST_TO_UNICAST))
 509		return true;
 510
 511	return ether_addr_equal(src, p->eth_addr);
 512}
 513
 514void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
 515{
 516	if (!mp->host_joined) {
 517		mp->host_joined = true;
 
 
 518		if (notify)
 519			br_mdb_notify(mp->br->dev, NULL, &mp->addr,
 520				      RTM_NEWMDB, 0);
 521	}
 522	mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
 
 
 
 
 523}
 524
 525void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
 526{
 527	if (!mp->host_joined)
 528		return;
 529
 530	mp->host_joined = false;
 
 
 531	if (notify)
 532		br_mdb_notify(mp->br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
 533}
 534
 535static int br_multicast_add_group(struct net_bridge *br,
 536				  struct net_bridge_port *port,
 537				  struct br_ip *group,
 538				  const unsigned char *src)
 
 
 
 
 539{
 540	struct net_bridge_port_group __rcu **pp;
 541	struct net_bridge_port_group *p;
 542	struct net_bridge_mdb_entry *mp;
 543	unsigned long now = jiffies;
 544	int err;
 545
 546	spin_lock(&br->multicast_lock);
 547	if (!netif_running(br->dev) ||
 548	    (port && port->state == BR_STATE_DISABLED))
 549		goto out;
 550
 551	mp = br_multicast_new_group(br, group);
 552	err = PTR_ERR(mp);
 553	if (IS_ERR(mp))
 554		goto err;
 555
 556	if (!port) {
 557		br_multicast_host_join(mp, true);
 558		goto out;
 559	}
 560
 561	for (pp = &mp->ports;
 562	     (p = mlock_dereference(*pp, br)) != NULL;
 563	     pp = &p->next) {
 564		if (br_port_group_equal(p, port, src))
 565			goto found;
 566		if ((unsigned long)p->port < (unsigned long)port)
 567			break;
 568	}
 569
 570	p = br_multicast_new_port_group(port, group, *pp, 0, src);
 571	if (unlikely(!p))
 572		goto err;
 
 
 
 573	rcu_assign_pointer(*pp, p);
 574	br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
 
 
 575
 576found:
 577	mod_timer(&p->timer, now + br->multicast_membership_interval);
 
 
 
 578out:
 579	err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 580
 581err:
 582	spin_unlock(&br->multicast_lock);
 583	return err;
 584}
 585
 586static int br_ip4_multicast_add_group(struct net_bridge *br,
 587				      struct net_bridge_port *port,
 588				      __be32 group,
 589				      __u16 vid,
 590				      const unsigned char *src)
 
 591{
 592	struct br_ip br_group;
 
 593
 594	if (ipv4_is_local_multicast(group))
 595		return 0;
 596
 597	memset(&br_group, 0, sizeof(br_group));
 598	br_group.u.ip4 = group;
 599	br_group.proto = htons(ETH_P_IP);
 600	br_group.vid = vid;
 
 601
 602	return br_multicast_add_group(br, port, &br_group, src);
 
 603}
 604
 605#if IS_ENABLED(CONFIG_IPV6)
 606static int br_ip6_multicast_add_group(struct net_bridge *br,
 607				      struct net_bridge_port *port,
 608				      const struct in6_addr *group,
 609				      __u16 vid,
 610				      const unsigned char *src)
 
 611{
 612	struct br_ip br_group;
 
 613
 614	if (ipv6_addr_is_ll_all_nodes(group))
 615		return 0;
 616
 617	memset(&br_group, 0, sizeof(br_group));
 618	br_group.u.ip6 = *group;
 619	br_group.proto = htons(ETH_P_IPV6);
 620	br_group.vid = vid;
 
 621
 622	return br_multicast_add_group(br, port, &br_group, src);
 
 623}
 624#endif
 625
 626static void br_multicast_router_expired(struct timer_list *t)
 627{
 628	struct net_bridge_port *port =
 629			from_timer(port, t, multicast_router_timer);
 630	struct net_bridge *br = port->br;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 631
 632	spin_lock(&br->multicast_lock);
 633	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
 634	    port->multicast_router == MDB_RTR_TYPE_PERM ||
 635	    timer_pending(&port->multicast_router_timer))
 636		goto out;
 637
 638	__del_port_router(port);
 
 639out:
 640	spin_unlock(&br->multicast_lock);
 641}
 642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643static void br_mc_router_state_change(struct net_bridge *p,
 644				      bool is_mc_router)
 645{
 646	struct switchdev_attr attr = {
 647		.orig_dev = p->dev,
 648		.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
 649		.flags = SWITCHDEV_F_DEFER,
 650		.u.mrouter = is_mc_router,
 651	};
 652
 653	switchdev_port_attr_set(p->dev, &attr);
 654}
 655
 656static void br_multicast_local_router_expired(struct timer_list *t)
 
 657{
 658	struct net_bridge *br = from_timer(br, t, multicast_router_timer);
 659
 660	spin_lock(&br->multicast_lock);
 661	if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
 662	    br->multicast_router == MDB_RTR_TYPE_PERM ||
 663	    timer_pending(&br->multicast_router_timer))
 664		goto out;
 665
 666	br_mc_router_state_change(br, false);
 667out:
 668	spin_unlock(&br->multicast_lock);
 
 
 
 
 
 
 
 
 669}
 670
 671static void br_multicast_querier_expired(struct net_bridge *br,
 
 
 
 
 
 
 
 
 
 
 672					 struct bridge_mcast_own_query *query)
 673{
 674	spin_lock(&br->multicast_lock);
 675	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
 
 
 676		goto out;
 677
 678	br_multicast_start_querier(br, query);
 679
 680out:
 681	spin_unlock(&br->multicast_lock);
 682}
 683
 684static void br_ip4_multicast_querier_expired(struct timer_list *t)
 685{
 686	struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
 
 687
 688	br_multicast_querier_expired(br, &br->ip4_own_query);
 689}
 690
 691#if IS_ENABLED(CONFIG_IPV6)
 692static void br_ip6_multicast_querier_expired(struct timer_list *t)
 693{
 694	struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
 
 695
 696	br_multicast_querier_expired(br, &br->ip6_own_query);
 697}
 698#endif
 699
 700static void br_multicast_select_own_querier(struct net_bridge *br,
 701					    struct br_ip *ip,
 702					    struct sk_buff *skb)
 703{
 704	if (ip->proto == htons(ETH_P_IP))
 705		br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
 706#if IS_ENABLED(CONFIG_IPV6)
 707	else
 708		br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
 709#endif
 710}
 711
 712static void __br_multicast_send_query(struct net_bridge *br,
 713				      struct net_bridge_port *port,
 714				      struct br_ip *ip)
 
 
 
 
 
 715{
 
 716	struct sk_buff *skb;
 717	u8 igmp_type;
 718
 719	skb = br_multicast_alloc_query(br, ip, &igmp_type);
 
 
 
 
 
 
 
 720	if (!skb)
 721		return;
 722
 723	if (port) {
 724		skb->dev = port->dev;
 725		br_multicast_count(br, port, skb, igmp_type,
 726				   BR_MCAST_DIR_TX);
 727		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
 728			dev_net(port->dev), NULL, skb, NULL, skb->dev,
 729			br_dev_queue_push_xmit);
 
 
 
 
 
 730	} else {
 731		br_multicast_select_own_querier(br, ip, skb);
 732		br_multicast_count(br, port, skb, igmp_type,
 733				   BR_MCAST_DIR_RX);
 734		netif_rx(skb);
 735	}
 736}
 737
 738static void br_multicast_send_query(struct net_bridge *br,
 739				    struct net_bridge_port *port,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740				    struct bridge_mcast_own_query *own_query)
 741{
 742	struct bridge_mcast_other_query *other_query = NULL;
 
 743	struct br_ip br_group;
 744	unsigned long time;
 745
 746	if (!netif_running(br->dev) ||
 747	    !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
 748	    !br_opt_get(br, BROPT_MULTICAST_QUERIER))
 749		return;
 750
 751	memset(&br_group.u, 0, sizeof(br_group.u));
 752
 753	if (port ? (own_query == &port->ip4_own_query) :
 754		   (own_query == &br->ip4_own_query)) {
 755		other_query = &br->ip4_other_query;
 
 756		br_group.proto = htons(ETH_P_IP);
 757#if IS_ENABLED(CONFIG_IPV6)
 758	} else {
 759		other_query = &br->ip6_other_query;
 
 760		br_group.proto = htons(ETH_P_IPV6);
 761#endif
 762	}
 763
 764	if (!other_query || timer_pending(&other_query->timer))
 765		return;
 766
 767	__br_multicast_send_query(br, port, &br_group);
 
 
 
 
 
 
 
 
 768
 769	time = jiffies;
 770	time += own_query->startup_sent < br->multicast_startup_query_count ?
 771		br->multicast_startup_query_interval :
 772		br->multicast_query_interval;
 773	mod_timer(&own_query->timer, time);
 774}
 775
 776static void
 777br_multicast_port_query_expired(struct net_bridge_port *port,
 778				struct bridge_mcast_own_query *query)
 779{
 780	struct net_bridge *br = port->br;
 
 781
 782	spin_lock(&br->multicast_lock);
 783	if (port->state == BR_STATE_DISABLED ||
 784	    port->state == BR_STATE_BLOCKING)
 785		goto out;
 786
 787	if (query->startup_sent < br->multicast_startup_query_count)
 
 788		query->startup_sent++;
 789
 790	br_multicast_send_query(port->br, port, query);
 791
 792out:
 793	spin_unlock(&br->multicast_lock);
 794}
 795
 796static void br_ip4_multicast_port_query_expired(struct timer_list *t)
 797{
 798	struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
 
 799
 800	br_multicast_port_query_expired(port, &port->ip4_own_query);
 801}
 802
 803#if IS_ENABLED(CONFIG_IPV6)
 804static void br_ip6_multicast_port_query_expired(struct timer_list *t)
 805{
 806	struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
 
 807
 808	br_multicast_port_query_expired(port, &port->ip6_own_query);
 809}
 810#endif
 811
 812static void br_mc_disabled_update(struct net_device *dev, bool value)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813{
 814	struct switchdev_attr attr = {
 815		.orig_dev = dev,
 816		.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
 817		.flags = SWITCHDEV_F_DEFER,
 818		.u.mc_disabled = !value,
 819	};
 820
 821	switchdev_port_attr_set(dev, &attr);
 822}
 823
 824int br_multicast_add_port(struct net_bridge_port *port)
 825{
 826	port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
 827
 828	timer_setup(&port->multicast_router_timer,
 829		    br_multicast_router_expired, 0);
 830	timer_setup(&port->ip4_own_query.timer,
 
 
 
 831		    br_ip4_multicast_port_query_expired, 0);
 832#if IS_ENABLED(CONFIG_IPV6)
 833	timer_setup(&port->ip6_own_query.timer,
 
 
 834		    br_ip6_multicast_port_query_expired, 0);
 835#endif
 836	br_mc_disabled_update(port->dev,
 837			      br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 838
 839	port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
 840	if (!port->mcast_stats)
 841		return -ENOMEM;
 842
 843	return 0;
 844}
 845
 846void br_multicast_del_port(struct net_bridge_port *port)
 847{
 848	struct net_bridge *br = port->br;
 849	struct net_bridge_port_group *pg;
 
 850	struct hlist_node *n;
 851
 852	/* Take care of the remaining groups, only perm ones should be left */
 853	spin_lock_bh(&br->multicast_lock);
 854	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
 855		br_multicast_del_pg(br, pg);
 
 856	spin_unlock_bh(&br->multicast_lock);
 857	del_timer_sync(&port->multicast_router_timer);
 
 858	free_percpu(port->mcast_stats);
 859}
 860
 861static void br_multicast_enable(struct bridge_mcast_own_query *query)
 862{
 863	query->startup_sent = 0;
 864
 865	if (try_to_del_timer_sync(&query->timer) >= 0 ||
 866	    del_timer(&query->timer))
 867		mod_timer(&query->timer, jiffies);
 868}
 869
 870static void __br_multicast_enable_port(struct net_bridge_port *port)
 871{
 872	struct net_bridge *br = port->br;
 
 873
 874	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
 
 
 875		return;
 876
 877	br_multicast_enable(&port->ip4_own_query);
 878#if IS_ENABLED(CONFIG_IPV6)
 879	br_multicast_enable(&port->ip6_own_query);
 880#endif
 881	if (port->multicast_router == MDB_RTR_TYPE_PERM &&
 882	    hlist_unhashed(&port->rlist))
 883		br_multicast_add_router(br, port);
 
 884}
 885
 886void br_multicast_enable_port(struct net_bridge_port *port)
 887{
 888	struct net_bridge *br = port->br;
 889
 890	spin_lock(&br->multicast_lock);
 891	__br_multicast_enable_port(port);
 892	spin_unlock(&br->multicast_lock);
 893}
 894
 895void br_multicast_disable_port(struct net_bridge_port *port)
 896{
 897	struct net_bridge *br = port->br;
 898	struct net_bridge_port_group *pg;
 899	struct hlist_node *n;
 
 900
 901	spin_lock(&br->multicast_lock);
 902	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
 903		if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
 904			br_multicast_del_pg(br, pg);
 
 
 
 
 
 
 
 
 
 
 
 
 905
 906	__del_port_router(port);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 907
 908	del_timer(&port->multicast_router_timer);
 909	del_timer(&port->ip4_own_query.timer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910#if IS_ENABLED(CONFIG_IPV6)
 911	del_timer(&port->ip6_own_query.timer);
 
 912#endif
 913	spin_unlock(&br->multicast_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914}
 915
 916static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
 917					 struct net_bridge_port *port,
 918					 struct sk_buff *skb,
 919					 u16 vid)
 920{
 
 
 
 921	const unsigned char *src;
 922	struct igmpv3_report *ih;
 923	struct igmpv3_grec *grec;
 924	int i;
 925	int len;
 926	int num;
 927	int type;
 928	int err = 0;
 929	__be32 group;
 930	u16 nsrcs;
 931
 932	ih = igmpv3_report_hdr(skb);
 933	num = ntohs(ih->ngrec);
 934	len = skb_transport_offset(skb) + sizeof(*ih);
 935
 936	for (i = 0; i < num; i++) {
 937		len += sizeof(*grec);
 938		if (!ip_mc_may_pull(skb, len))
 939			return -EINVAL;
 940
 941		grec = (void *)(skb->data + len - sizeof(*grec));
 942		group = grec->grec_mca;
 943		type = grec->grec_type;
 944		nsrcs = ntohs(grec->grec_nsrcs);
 945
 946		len += nsrcs * 4;
 947		if (!ip_mc_may_pull(skb, len))
 948			return -EINVAL;
 949
 950		/* We treat this as an IGMPv2 report for now. */
 951		switch (type) {
 952		case IGMPV3_MODE_IS_INCLUDE:
 953		case IGMPV3_MODE_IS_EXCLUDE:
 954		case IGMPV3_CHANGE_TO_INCLUDE:
 955		case IGMPV3_CHANGE_TO_EXCLUDE:
 956		case IGMPV3_ALLOW_NEW_SOURCES:
 957		case IGMPV3_BLOCK_OLD_SOURCES:
 958			break;
 959
 960		default:
 961			continue;
 962		}
 963
 964		src = eth_hdr(skb)->h_source;
 965		if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
 966		     type == IGMPV3_MODE_IS_INCLUDE) &&
 967		    nsrcs == 0) {
 968			br_ip4_multicast_leave_group(br, port, group, vid, src);
 
 
 
 
 969		} else {
 970			err = br_ip4_multicast_add_group(br, port, group, vid,
 971							 src);
 972			if (err)
 973				break;
 974		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975	}
 976
 977	return err;
 978}
 979
 980#if IS_ENABLED(CONFIG_IPV6)
 981static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 982					struct net_bridge_port *port,
 983					struct sk_buff *skb,
 984					u16 vid)
 985{
 
 
 
 986	unsigned int nsrcs_offset;
 
 987	const unsigned char *src;
 988	struct icmp6hdr *icmp6h;
 989	struct mld2_grec *grec;
 990	unsigned int grec_len;
 991	int i;
 992	int len;
 993	int num;
 994	int err = 0;
 995
 996	if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
 997		return -EINVAL;
 998
 999	icmp6h = icmp6_hdr(skb);
1000	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1001	len = skb_transport_offset(skb) + sizeof(*icmp6h);
1002
1003	for (i = 0; i < num; i++) {
1004		__be16 *_nsrcs, __nsrcs;
1005		u16 nsrcs;
1006
1007		nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1008
1009		if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1010		    nsrcs_offset + sizeof(_nsrcs))
1011			return -EINVAL;
1012
1013		_nsrcs = skb_header_pointer(skb, nsrcs_offset,
1014					    sizeof(__nsrcs), &__nsrcs);
1015		if (!_nsrcs)
1016			return -EINVAL;
1017
1018		nsrcs = ntohs(*_nsrcs);
1019		grec_len = struct_size(grec, grec_src, nsrcs);
1020
1021		if (!ipv6_mc_may_pull(skb, len + grec_len))
1022			return -EINVAL;
1023
1024		grec = (struct mld2_grec *)(skb->data + len);
1025		len += grec_len;
1026
1027		/* We treat these as MLDv1 reports for now. */
1028		switch (grec->grec_type) {
1029		case MLD2_MODE_IS_INCLUDE:
1030		case MLD2_MODE_IS_EXCLUDE:
1031		case MLD2_CHANGE_TO_INCLUDE:
1032		case MLD2_CHANGE_TO_EXCLUDE:
1033		case MLD2_ALLOW_NEW_SOURCES:
1034		case MLD2_BLOCK_OLD_SOURCES:
1035			break;
1036
1037		default:
1038			continue;
1039		}
1040
1041		src = eth_hdr(skb)->h_source;
1042		if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1043		     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1044		    nsrcs == 0) {
1045			br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1046						     vid, src);
 
 
 
 
1047		} else {
1048			err = br_ip6_multicast_add_group(br, port,
1049							 &grec->grec_mca, vid,
1050							 src);
1051			if (err)
1052				break;
1053		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054	}
1055
1056	return err;
1057}
1058#endif
1059
1060static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1061					    struct net_bridge_port *port,
1062					    __be32 saddr)
1063{
1064	if (!timer_pending(&br->ip4_own_query.timer) &&
1065	    !timer_pending(&br->ip4_other_query.timer))
1066		goto update;
1067
1068	if (!br->ip4_querier.addr.u.ip4)
1069		goto update;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070
1071	if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1072		goto update;
1073
1074	return false;
1075
1076update:
1077	br->ip4_querier.addr.u.ip4 = saddr;
1078
1079	/* update protected by general multicast_lock by caller */
1080	rcu_assign_pointer(br->ip4_querier.port, port);
1081
1082	return true;
1083}
1084
1085#if IS_ENABLED(CONFIG_IPV6)
1086static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1087					    struct net_bridge_port *port,
1088					    struct in6_addr *saddr)
1089{
1090	if (!timer_pending(&br->ip6_own_query.timer) &&
1091	    !timer_pending(&br->ip6_other_query.timer))
1092		goto update;
1093
1094	if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1095		goto update;
1096
1097	return false;
1098
1099update:
1100	br->ip6_querier.addr.u.ip6 = *saddr;
1101
1102	/* update protected by general multicast_lock by caller */
1103	rcu_assign_pointer(br->ip6_querier.port, port);
 
 
 
 
1104
1105	return true;
1106}
1107#endif
1108
1109static bool br_multicast_select_querier(struct net_bridge *br,
1110					struct net_bridge_port *port,
1111					struct br_ip *saddr)
1112{
1113	switch (saddr->proto) {
1114	case htons(ETH_P_IP):
1115		return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
 
1116#if IS_ENABLED(CONFIG_IPV6)
1117	case htons(ETH_P_IPV6):
1118		return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
 
1119#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120	}
1121
1122	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1123}
1124
1125static void
1126br_multicast_update_query_timer(struct net_bridge *br,
1127				struct bridge_mcast_other_query *query,
1128				unsigned long max_delay)
1129{
1130	if (!timer_pending(&query->timer))
1131		query->delay_time = jiffies + max_delay;
1132
1133	mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1134}
1135
1136static void br_port_mc_router_state_change(struct net_bridge_port *p,
1137					   bool is_mc_router)
1138{
1139	struct switchdev_attr attr = {
1140		.orig_dev = p->dev,
1141		.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1142		.flags = SWITCHDEV_F_DEFER,
1143		.u.mrouter = is_mc_router,
1144	};
1145
1146	switchdev_port_attr_set(p->dev, &attr);
1147}
1148
1149/*
1150 * Add port to router_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151 *  list is maintained ordered by pointer value
1152 *  and locked by br->multicast_lock and RCU
1153 */
1154static void br_multicast_add_router(struct net_bridge *br,
1155				    struct net_bridge_port *port)
 
 
1156{
1157	struct net_bridge_port *p;
1158	struct hlist_node *slot = NULL;
1159
1160	if (!hlist_unhashed(&port->rlist))
1161		return;
1162
1163	hlist_for_each_entry(p, &br->router_list, rlist) {
1164		if ((unsigned long) port >= (unsigned long) p)
1165			break;
1166		slot = &p->rlist;
1167	}
1168
1169	if (slot)
1170		hlist_add_behind_rcu(&port->rlist, slot);
1171	else
1172		hlist_add_head_rcu(&port->rlist, &br->router_list);
1173	br_rtr_notify(br->dev, port, RTM_NEWMDB);
1174	br_port_mc_router_state_change(port, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1175}
1176
1177static void br_multicast_mark_router(struct net_bridge *br,
1178				     struct net_bridge_port *port)
 
 
 
1179{
1180	unsigned long now = jiffies;
1181
1182	if (!port) {
1183		if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1184			if (!timer_pending(&br->multicast_router_timer))
1185				br_mc_router_state_change(br, true);
1186			mod_timer(&br->multicast_router_timer,
1187				  now + br->multicast_querier_interval);
 
 
 
1188		}
1189		return;
1190	}
1191
1192	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1193	    port->multicast_router == MDB_RTR_TYPE_PERM)
1194		return;
1195
1196	br_multicast_add_router(br, port);
 
 
 
 
 
 
 
 
 
 
 
 
 
1197
1198	mod_timer(&port->multicast_router_timer,
1199		  now + br->multicast_querier_interval);
1200}
1201
1202static void br_multicast_query_received(struct net_bridge *br,
1203					struct net_bridge_port *port,
1204					struct bridge_mcast_other_query *query,
1205					struct br_ip *saddr,
1206					unsigned long max_delay)
1207{
1208	if (!br_multicast_select_querier(br, port, saddr))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209		return;
1210
1211	br_multicast_update_query_timer(br, query, max_delay);
1212	br_multicast_mark_router(br, port);
1213}
1214
1215static void br_ip4_multicast_query(struct net_bridge *br,
1216				   struct net_bridge_port *port,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1217				   struct sk_buff *skb,
1218				   u16 vid)
1219{
1220	unsigned int transport_len = ip_transport_len(skb);
1221	const struct iphdr *iph = ip_hdr(skb);
1222	struct igmphdr *ih = igmp_hdr(skb);
1223	struct net_bridge_mdb_entry *mp;
1224	struct igmpv3_query *ih3;
1225	struct net_bridge_port_group *p;
1226	struct net_bridge_port_group __rcu **pp;
1227	struct br_ip saddr;
1228	unsigned long max_delay;
1229	unsigned long now = jiffies;
1230	__be32 group;
1231
1232	spin_lock(&br->multicast_lock);
1233	if (!netif_running(br->dev) ||
1234	    (port && port->state == BR_STATE_DISABLED))
1235		goto out;
1236
1237	group = ih->group;
1238
1239	if (transport_len == sizeof(*ih)) {
1240		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1241
1242		if (!max_delay) {
1243			max_delay = 10 * HZ;
1244			group = 0;
1245		}
1246	} else if (transport_len >= sizeof(*ih3)) {
1247		ih3 = igmpv3_query_hdr(skb);
1248		if (ih3->nsrcs)
 
 
1249			goto out;
1250
1251		max_delay = ih3->code ?
1252			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1253	} else {
1254		goto out;
1255	}
1256
1257	if (!group) {
1258		saddr.proto = htons(ETH_P_IP);
1259		saddr.u.ip4 = iph->saddr;
1260
1261		br_multicast_query_received(br, port, &br->ip4_other_query,
1262					    &saddr, max_delay);
 
1263		goto out;
1264	}
1265
1266	mp = br_mdb_ip4_get(br, group, vid);
1267	if (!mp)
1268		goto out;
1269
1270	max_delay *= br->multicast_last_member_count;
1271
1272	if (mp->host_joined &&
1273	    (timer_pending(&mp->timer) ?
1274	     time_after(mp->timer.expires, now + max_delay) :
1275	     try_to_del_timer_sync(&mp->timer) >= 0))
1276		mod_timer(&mp->timer, now + max_delay);
1277
1278	for (pp = &mp->ports;
1279	     (p = mlock_dereference(*pp, br)) != NULL;
1280	     pp = &p->next) {
1281		if (timer_pending(&p->timer) ?
1282		    time_after(p->timer.expires, now + max_delay) :
1283		    try_to_del_timer_sync(&p->timer) >= 0)
 
 
1284			mod_timer(&p->timer, now + max_delay);
1285	}
1286
1287out:
1288	spin_unlock(&br->multicast_lock);
1289}
1290
1291#if IS_ENABLED(CONFIG_IPV6)
1292static int br_ip6_multicast_query(struct net_bridge *br,
1293				  struct net_bridge_port *port,
1294				  struct sk_buff *skb,
1295				  u16 vid)
1296{
1297	unsigned int transport_len = ipv6_transport_len(skb);
1298	struct mld_msg *mld;
1299	struct net_bridge_mdb_entry *mp;
1300	struct mld2_query *mld2q;
1301	struct net_bridge_port_group *p;
1302	struct net_bridge_port_group __rcu **pp;
1303	struct br_ip saddr;
1304	unsigned long max_delay;
1305	unsigned long now = jiffies;
1306	unsigned int offset = skb_transport_offset(skb);
1307	const struct in6_addr *group = NULL;
1308	bool is_general_query;
1309	int err = 0;
1310
1311	spin_lock(&br->multicast_lock);
1312	if (!netif_running(br->dev) ||
1313	    (port && port->state == BR_STATE_DISABLED))
1314		goto out;
1315
1316	if (transport_len == sizeof(*mld)) {
1317		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1318			err = -EINVAL;
1319			goto out;
1320		}
1321		mld = (struct mld_msg *) icmp6_hdr(skb);
1322		max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1323		if (max_delay)
1324			group = &mld->mld_mca;
1325	} else {
1326		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1327			err = -EINVAL;
1328			goto out;
1329		}
1330		mld2q = (struct mld2_query *)icmp6_hdr(skb);
1331		if (!mld2q->mld2q_nsrcs)
1332			group = &mld2q->mld2q_mca;
 
 
 
 
1333
1334		max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1335	}
1336
1337	is_general_query = group && ipv6_addr_any(group);
1338
1339	if (is_general_query) {
1340		saddr.proto = htons(ETH_P_IPV6);
1341		saddr.u.ip6 = ipv6_hdr(skb)->saddr;
1342
1343		br_multicast_query_received(br, port, &br->ip6_other_query,
1344					    &saddr, max_delay);
 
1345		goto out;
1346	} else if (!group) {
1347		goto out;
1348	}
1349
1350	mp = br_mdb_ip6_get(br, group, vid);
1351	if (!mp)
1352		goto out;
1353
1354	max_delay *= br->multicast_last_member_count;
1355	if (mp->host_joined &&
1356	    (timer_pending(&mp->timer) ?
1357	     time_after(mp->timer.expires, now + max_delay) :
1358	     try_to_del_timer_sync(&mp->timer) >= 0))
1359		mod_timer(&mp->timer, now + max_delay);
1360
1361	for (pp = &mp->ports;
1362	     (p = mlock_dereference(*pp, br)) != NULL;
1363	     pp = &p->next) {
1364		if (timer_pending(&p->timer) ?
1365		    time_after(p->timer.expires, now + max_delay) :
1366		    try_to_del_timer_sync(&p->timer) >= 0)
 
 
1367			mod_timer(&p->timer, now + max_delay);
1368	}
1369
1370out:
1371	spin_unlock(&br->multicast_lock);
1372	return err;
1373}
1374#endif
1375
1376static void
1377br_multicast_leave_group(struct net_bridge *br,
1378			 struct net_bridge_port *port,
1379			 struct br_ip *group,
1380			 struct bridge_mcast_other_query *other_query,
1381			 struct bridge_mcast_own_query *own_query,
1382			 const unsigned char *src)
1383{
1384	struct net_bridge_mdb_entry *mp;
1385	struct net_bridge_port_group *p;
1386	unsigned long now;
1387	unsigned long time;
1388
1389	spin_lock(&br->multicast_lock);
1390	if (!netif_running(br->dev) ||
1391	    (port && port->state == BR_STATE_DISABLED))
1392		goto out;
1393
1394	mp = br_mdb_ip_get(br, group);
1395	if (!mp)
1396		goto out;
1397
1398	if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1399		struct net_bridge_port_group __rcu **pp;
1400
1401		for (pp = &mp->ports;
1402		     (p = mlock_dereference(*pp, br)) != NULL;
1403		     pp = &p->next) {
1404			if (!br_port_group_equal(p, port, src))
1405				continue;
1406
1407			if (p->flags & MDB_PG_FLAGS_PERMANENT)
1408				break;
1409
1410			rcu_assign_pointer(*pp, p->next);
1411			hlist_del_init(&p->mglist);
1412			del_timer(&p->timer);
1413			kfree_rcu(p, rcu);
1414			br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1415				      p->flags | MDB_PG_FLAGS_FAST_LEAVE);
1416
1417			if (!mp->ports && !mp->host_joined &&
1418			    netif_running(br->dev))
1419				mod_timer(&mp->timer, jiffies);
1420		}
1421		goto out;
1422	}
1423
1424	if (timer_pending(&other_query->timer))
1425		goto out;
1426
1427	if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1428		__br_multicast_send_query(br, port, &mp->addr);
 
1429
1430		time = jiffies + br->multicast_last_member_count *
1431				 br->multicast_last_member_interval;
1432
1433		mod_timer(&own_query->timer, time);
1434
1435		for (p = mlock_dereference(mp->ports, br);
1436		     p != NULL;
1437		     p = mlock_dereference(p->next, br)) {
1438			if (!br_port_group_equal(p, port, src))
1439				continue;
1440
1441			if (!hlist_unhashed(&p->mglist) &&
1442			    (timer_pending(&p->timer) ?
1443			     time_after(p->timer.expires, time) :
1444			     try_to_del_timer_sync(&p->timer) >= 0)) {
1445				mod_timer(&p->timer, time);
1446			}
1447
1448			break;
1449		}
1450	}
1451
1452	now = jiffies;
1453	time = now + br->multicast_last_member_count *
1454		     br->multicast_last_member_interval;
1455
1456	if (!port) {
1457		if (mp->host_joined &&
1458		    (timer_pending(&mp->timer) ?
1459		     time_after(mp->timer.expires, time) :
1460		     try_to_del_timer_sync(&mp->timer) >= 0)) {
1461			mod_timer(&mp->timer, time);
1462		}
1463
1464		goto out;
1465	}
1466
1467	for (p = mlock_dereference(mp->ports, br);
1468	     p != NULL;
1469	     p = mlock_dereference(p->next, br)) {
1470		if (p->port != port)
1471			continue;
1472
1473		if (!hlist_unhashed(&p->mglist) &&
1474		    (timer_pending(&p->timer) ?
1475		     time_after(p->timer.expires, time) :
1476		     try_to_del_timer_sync(&p->timer) >= 0)) {
1477			mod_timer(&p->timer, time);
1478		}
1479
1480		break;
1481	}
1482out:
1483	spin_unlock(&br->multicast_lock);
1484}
1485
1486static void br_ip4_multicast_leave_group(struct net_bridge *br,
1487					 struct net_bridge_port *port,
1488					 __be32 group,
1489					 __u16 vid,
1490					 const unsigned char *src)
1491{
1492	struct br_ip br_group;
1493	struct bridge_mcast_own_query *own_query;
1494
1495	if (ipv4_is_local_multicast(group))
1496		return;
1497
1498	own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1499
1500	memset(&br_group, 0, sizeof(br_group));
1501	br_group.u.ip4 = group;
1502	br_group.proto = htons(ETH_P_IP);
1503	br_group.vid = vid;
1504
1505	br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
 
1506				 own_query, src);
1507}
1508
1509#if IS_ENABLED(CONFIG_IPV6)
1510static void br_ip6_multicast_leave_group(struct net_bridge *br,
1511					 struct net_bridge_port *port,
1512					 const struct in6_addr *group,
1513					 __u16 vid,
1514					 const unsigned char *src)
1515{
1516	struct br_ip br_group;
1517	struct bridge_mcast_own_query *own_query;
1518
1519	if (ipv6_addr_is_ll_all_nodes(group))
1520		return;
1521
1522	own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1523
1524	memset(&br_group, 0, sizeof(br_group));
1525	br_group.u.ip6 = *group;
1526	br_group.proto = htons(ETH_P_IPV6);
1527	br_group.vid = vid;
1528
1529	br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
 
1530				 own_query, src);
1531}
1532#endif
1533
1534static void br_multicast_err_count(const struct net_bridge *br,
1535				   const struct net_bridge_port *p,
1536				   __be16 proto)
1537{
1538	struct bridge_mcast_stats __percpu *stats;
1539	struct bridge_mcast_stats *pstats;
1540
1541	if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1542		return;
1543
1544	if (p)
1545		stats = p->mcast_stats;
1546	else
1547		stats = br->mcast_stats;
1548	if (WARN_ON(!stats))
1549		return;
1550
1551	pstats = this_cpu_ptr(stats);
1552
1553	u64_stats_update_begin(&pstats->syncp);
1554	switch (proto) {
1555	case htons(ETH_P_IP):
1556		pstats->mstats.igmp_parse_errors++;
1557		break;
1558#if IS_ENABLED(CONFIG_IPV6)
1559	case htons(ETH_P_IPV6):
1560		pstats->mstats.mld_parse_errors++;
1561		break;
1562#endif
1563	}
1564	u64_stats_update_end(&pstats->syncp);
1565}
1566
1567static void br_multicast_pim(struct net_bridge *br,
1568			     struct net_bridge_port *port,
1569			     const struct sk_buff *skb)
1570{
1571	unsigned int offset = skb_transport_offset(skb);
1572	struct pimhdr *pimhdr, _pimhdr;
1573
1574	pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1575	if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1576	    pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1577		return;
1578
1579	br_multicast_mark_router(br, port);
 
 
1580}
1581
1582static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
1583				    struct net_bridge_port *port,
1584				    struct sk_buff *skb)
1585{
1586	if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
1587	    igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
1588		return -ENOMSG;
1589
1590	br_multicast_mark_router(br, port);
 
 
1591
1592	return 0;
1593}
1594
1595static int br_multicast_ipv4_rcv(struct net_bridge *br,
1596				 struct net_bridge_port *port,
1597				 struct sk_buff *skb,
1598				 u16 vid)
1599{
 
1600	const unsigned char *src;
1601	struct igmphdr *ih;
1602	int err;
1603
1604	err = ip_mc_check_igmp(skb);
1605
1606	if (err == -ENOMSG) {
1607		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1608			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1609		} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1610			if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1611				br_multicast_pim(br, port, skb);
1612		} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
1613			br_ip4_multicast_mrd_rcv(br, port, skb);
1614		}
1615
1616		return 0;
1617	} else if (err < 0) {
1618		br_multicast_err_count(br, port, skb->protocol);
1619		return err;
1620	}
1621
1622	ih = igmp_hdr(skb);
1623	src = eth_hdr(skb)->h_source;
1624	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1625
1626	switch (ih->type) {
1627	case IGMP_HOST_MEMBERSHIP_REPORT:
1628	case IGMPV2_HOST_MEMBERSHIP_REPORT:
1629		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1630		err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
 
1631		break;
1632	case IGMPV3_HOST_MEMBERSHIP_REPORT:
1633		err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
1634		break;
1635	case IGMP_HOST_MEMBERSHIP_QUERY:
1636		br_ip4_multicast_query(br, port, skb, vid);
1637		break;
1638	case IGMP_HOST_LEAVE_MESSAGE:
1639		br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1640		break;
1641	}
1642
1643	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1644			   BR_MCAST_DIR_RX);
1645
1646	return err;
1647}
1648
1649#if IS_ENABLED(CONFIG_IPV6)
1650static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
1651				    struct net_bridge_port *port,
1652				    struct sk_buff *skb)
1653{
1654	int ret;
1655
1656	if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1657		return -ENOMSG;
1658
1659	ret = ipv6_mc_check_icmpv6(skb);
1660	if (ret < 0)
1661		return ret;
1662
1663	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
1664		return -ENOMSG;
1665
1666	br_multicast_mark_router(br, port);
1667
1668	return 0;
 
 
1669}
1670
1671static int br_multicast_ipv6_rcv(struct net_bridge *br,
1672				 struct net_bridge_port *port,
1673				 struct sk_buff *skb,
1674				 u16 vid)
1675{
 
1676	const unsigned char *src;
1677	struct mld_msg *mld;
1678	int err;
1679
1680	err = ipv6_mc_check_mld(skb);
1681
1682	if (err == -ENOMSG) {
1683		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1684			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1685
1686		if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
1687			err = br_ip6_multicast_mrd_rcv(br, port, skb);
1688
1689			if (err < 0 && err != -ENOMSG) {
1690				br_multicast_err_count(br, port, skb->protocol);
1691				return err;
1692			}
1693		}
1694
1695		return 0;
1696	} else if (err < 0) {
1697		br_multicast_err_count(br, port, skb->protocol);
1698		return err;
1699	}
1700
1701	mld = (struct mld_msg *)skb_transport_header(skb);
1702	BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1703
1704	switch (mld->mld_type) {
1705	case ICMPV6_MGM_REPORT:
1706		src = eth_hdr(skb)->h_source;
1707		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1708		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1709						 src);
1710		break;
1711	case ICMPV6_MLD2_REPORT:
1712		err = br_ip6_multicast_mld2_report(br, port, skb, vid);
1713		break;
1714	case ICMPV6_MGM_QUERY:
1715		err = br_ip6_multicast_query(br, port, skb, vid);
1716		break;
1717	case ICMPV6_MGM_REDUCTION:
1718		src = eth_hdr(skb)->h_source;
1719		br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
 
1720		break;
1721	}
1722
1723	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1724			   BR_MCAST_DIR_RX);
1725
1726	return err;
1727}
1728#endif
1729
1730int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
 
 
1731		     struct sk_buff *skb, u16 vid)
1732{
1733	int ret = 0;
1734
1735	BR_INPUT_SKB_CB(skb)->igmp = 0;
1736	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1737
1738	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1739		return 0;
1740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1741	switch (skb->protocol) {
1742	case htons(ETH_P_IP):
1743		ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1744		break;
1745#if IS_ENABLED(CONFIG_IPV6)
1746	case htons(ETH_P_IPV6):
1747		ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1748		break;
1749#endif
1750	}
1751
1752	return ret;
1753}
1754
1755static void br_multicast_query_expired(struct net_bridge *br,
1756				       struct bridge_mcast_own_query *query,
1757				       struct bridge_mcast_querier *querier)
1758{
1759	spin_lock(&br->multicast_lock);
1760	if (query->startup_sent < br->multicast_startup_query_count)
 
 
 
1761		query->startup_sent++;
1762
1763	RCU_INIT_POINTER(querier->port, NULL);
1764	br_multicast_send_query(br, NULL, query);
1765	spin_unlock(&br->multicast_lock);
1766}
1767
1768static void br_ip4_multicast_query_expired(struct timer_list *t)
1769{
1770	struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
 
1771
1772	br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
 
1773}
1774
1775#if IS_ENABLED(CONFIG_IPV6)
1776static void br_ip6_multicast_query_expired(struct timer_list *t)
1777{
1778	struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
 
1779
1780	br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
 
1781}
1782#endif
1783
1784void br_multicast_init(struct net_bridge *br)
1785{
1786	br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
 
 
1787
1788	br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1789	br->multicast_last_member_count = 2;
1790	br->multicast_startup_query_count = 2;
1791
1792	br->multicast_last_member_interval = HZ;
1793	br->multicast_query_response_interval = 10 * HZ;
1794	br->multicast_startup_query_interval = 125 * HZ / 4;
1795	br->multicast_query_interval = 125 * HZ;
1796	br->multicast_querier_interval = 255 * HZ;
1797	br->multicast_membership_interval = 260 * HZ;
1798
1799	br->ip4_other_query.delay_time = 0;
1800	br->ip4_querier.port = NULL;
1801	br->multicast_igmp_version = 2;
1802#if IS_ENABLED(CONFIG_IPV6)
1803	br->multicast_mld_version = 1;
1804	br->ip6_other_query.delay_time = 0;
1805	br->ip6_querier.port = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1806#endif
1807	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
1808	br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
1809
1810	spin_lock_init(&br->multicast_lock);
1811	timer_setup(&br->multicast_router_timer,
1812		    br_multicast_local_router_expired, 0);
1813	timer_setup(&br->ip4_other_query.timer,
1814		    br_ip4_multicast_querier_expired, 0);
1815	timer_setup(&br->ip4_own_query.timer,
1816		    br_ip4_multicast_query_expired, 0);
1817#if IS_ENABLED(CONFIG_IPV6)
1818	timer_setup(&br->ip6_other_query.timer,
 
 
1819		    br_ip6_multicast_querier_expired, 0);
1820	timer_setup(&br->ip6_own_query.timer,
1821		    br_ip6_multicast_query_expired, 0);
1822#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1823	INIT_HLIST_HEAD(&br->mdb_list);
 
 
1824}
1825
1826static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
1827{
1828	struct in_device *in_dev = in_dev_get(br->dev);
1829
1830	if (!in_dev)
1831		return;
1832
1833	__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1834	in_dev_put(in_dev);
1835}
1836
1837#if IS_ENABLED(CONFIG_IPV6)
1838static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1839{
1840	struct in6_addr addr;
1841
1842	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1843	ipv6_dev_mc_inc(br->dev, &addr);
1844}
1845#else
1846static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1847{
1848}
1849#endif
1850
1851static void br_multicast_join_snoopers(struct net_bridge *br)
1852{
1853	br_ip4_multicast_join_snoopers(br);
1854	br_ip6_multicast_join_snoopers(br);
1855}
1856
1857static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
1858{
1859	struct in_device *in_dev = in_dev_get(br->dev);
1860
1861	if (WARN_ON(!in_dev))
1862		return;
1863
1864	__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1865	in_dev_put(in_dev);
1866}
1867
1868#if IS_ENABLED(CONFIG_IPV6)
1869static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1870{
1871	struct in6_addr addr;
1872
1873	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1874	ipv6_dev_mc_dec(br->dev, &addr);
1875}
1876#else
1877static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1878{
1879}
1880#endif
1881
1882static void br_multicast_leave_snoopers(struct net_bridge *br)
1883{
1884	br_ip4_multicast_leave_snoopers(br);
1885	br_ip6_multicast_leave_snoopers(br);
1886}
1887
1888static void __br_multicast_open(struct net_bridge *br,
1889				struct bridge_mcast_own_query *query)
1890{
1891	query->startup_sent = 0;
1892
1893	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1894		return;
1895
1896	mod_timer(&query->timer, jiffies);
1897}
1898
 
 
 
 
 
 
 
 
1899void br_multicast_open(struct net_bridge *br)
1900{
1901	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1902		br_multicast_join_snoopers(br);
1903
1904	__br_multicast_open(br, &br->ip4_own_query);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1905#if IS_ENABLED(CONFIG_IPV6)
1906	__br_multicast_open(br, &br->ip6_own_query);
 
 
1907#endif
1908}
1909
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1910void br_multicast_stop(struct net_bridge *br)
1911{
1912	del_timer_sync(&br->multicast_router_timer);
1913	del_timer_sync(&br->ip4_other_query.timer);
1914	del_timer_sync(&br->ip4_own_query.timer);
1915#if IS_ENABLED(CONFIG_IPV6)
1916	del_timer_sync(&br->ip6_other_query.timer);
1917	del_timer_sync(&br->ip6_own_query.timer);
1918#endif
1919
1920	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1921		br_multicast_leave_snoopers(br);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1922}
1923
1924void br_multicast_dev_del(struct net_bridge *br)
1925{
1926	struct net_bridge_mdb_entry *mp;
 
1927	struct hlist_node *tmp;
1928
1929	spin_lock_bh(&br->multicast_lock);
1930	hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
1931		del_timer(&mp->timer);
1932		rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
1933				       br_mdb_rht_params);
1934		hlist_del_rcu(&mp->mdb_node);
1935		kfree_rcu(mp, rcu);
1936	}
1937	spin_unlock_bh(&br->multicast_lock);
1938
 
 
 
 
1939	rcu_barrier();
1940}
1941
1942int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1943{
1944	int err = -EINVAL;
1945
1946	spin_lock_bh(&br->multicast_lock);
1947
1948	switch (val) {
1949	case MDB_RTR_TYPE_DISABLED:
1950	case MDB_RTR_TYPE_PERM:
1951		br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
1952		del_timer(&br->multicast_router_timer);
1953		br->multicast_router = val;
 
 
 
1954		err = 0;
1955		break;
1956	case MDB_RTR_TYPE_TEMP_QUERY:
1957		if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
1958			br_mc_router_state_change(br, false);
1959		br->multicast_router = val;
1960		err = 0;
1961		break;
1962	}
1963
1964	spin_unlock_bh(&br->multicast_lock);
1965
1966	return err;
1967}
1968
1969static void __del_port_router(struct net_bridge_port *p)
 
1970{
1971	if (hlist_unhashed(&p->rlist))
 
 
 
 
 
 
1972		return;
1973	hlist_del_init_rcu(&p->rlist);
1974	br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1975	br_port_mc_router_state_change(p, false);
 
 
 
 
1976
1977	/* don't allow timer refresh */
1978	if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1979		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1980}
1981
1982int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
 
1983{
1984	struct net_bridge *br = p->br;
1985	unsigned long now = jiffies;
1986	int err = -EINVAL;
 
1987
1988	spin_lock(&br->multicast_lock);
1989	if (p->multicast_router == val) {
 
1990		/* Refresh the temp router port timer */
1991		if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1992			mod_timer(&p->multicast_router_timer,
1993				  now + br->multicast_querier_interval);
 
 
 
 
 
1994		err = 0;
1995		goto unlock;
1996	}
1997	switch (val) {
1998	case MDB_RTR_TYPE_DISABLED:
1999		p->multicast_router = MDB_RTR_TYPE_DISABLED;
2000		__del_port_router(p);
2001		del_timer(&p->multicast_router_timer);
 
 
 
 
 
2002		break;
2003	case MDB_RTR_TYPE_TEMP_QUERY:
2004		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2005		__del_port_router(p);
 
 
2006		break;
2007	case MDB_RTR_TYPE_PERM:
2008		p->multicast_router = MDB_RTR_TYPE_PERM;
2009		del_timer(&p->multicast_router_timer);
2010		br_multicast_add_router(br, p);
 
 
 
 
2011		break;
2012	case MDB_RTR_TYPE_TEMP:
2013		p->multicast_router = MDB_RTR_TYPE_TEMP;
2014		br_multicast_mark_router(br, p);
 
2015		break;
2016	default:
2017		goto unlock;
2018	}
2019	err = 0;
2020unlock:
2021	spin_unlock(&br->multicast_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
2022
2023	return err;
2024}
2025
2026static void br_multicast_start_querier(struct net_bridge *br,
2027				       struct bridge_mcast_own_query *query)
2028{
2029	struct net_bridge_port *port;
2030
2031	__br_multicast_open(br, query);
 
 
 
2032
2033	rcu_read_lock();
2034	list_for_each_entry_rcu(port, &br->port_list, list) {
2035		if (port->state == BR_STATE_DISABLED ||
2036		    port->state == BR_STATE_BLOCKING)
 
 
 
 
2037			continue;
2038
2039		if (query == &br->ip4_own_query)
2040			br_multicast_enable(&port->ip4_own_query);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2041#if IS_ENABLED(CONFIG_IPV6)
2042		else
2043			br_multicast_enable(&port->ip6_own_query);
2044#endif
2045	}
2046	rcu_read_unlock();
2047}
2048
2049int br_multicast_toggle(struct net_bridge *br, unsigned long val)
 
2050{
2051	struct net_bridge_port *port;
 
 
2052
2053	spin_lock_bh(&br->multicast_lock);
2054	if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
2055		goto unlock;
2056
2057	br_mc_disabled_update(br->dev, val);
 
 
 
 
 
2058	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
2059	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
2060		br_multicast_leave_snoopers(br);
2061		goto unlock;
2062	}
2063
2064	if (!netif_running(br->dev))
2065		goto unlock;
2066
2067	br_multicast_open(br);
2068	list_for_each_entry(port, &br->port_list, list)
2069		__br_multicast_enable_port(port);
 
 
2070
2071unlock:
2072	spin_unlock_bh(&br->multicast_lock);
2073
2074	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2075}
2076
2077bool br_multicast_enabled(const struct net_device *dev)
2078{
2079	struct net_bridge *br = netdev_priv(dev);
2080
2081	return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
2082}
2083EXPORT_SYMBOL_GPL(br_multicast_enabled);
2084
2085bool br_multicast_router(const struct net_device *dev)
2086{
2087	struct net_bridge *br = netdev_priv(dev);
2088	bool is_router;
2089
2090	spin_lock_bh(&br->multicast_lock);
2091	is_router = br_multicast_is_router(br);
2092	spin_unlock_bh(&br->multicast_lock);
2093	return is_router;
2094}
2095EXPORT_SYMBOL_GPL(br_multicast_router);
2096
2097int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2098{
2099	unsigned long max_delay;
2100
2101	val = !!val;
2102
2103	spin_lock_bh(&br->multicast_lock);
2104	if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
2105		goto unlock;
2106
2107	br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
2108	if (!val)
2109		goto unlock;
2110
2111	max_delay = br->multicast_query_response_interval;
2112
2113	if (!timer_pending(&br->ip4_other_query.timer))
2114		br->ip4_other_query.delay_time = jiffies + max_delay;
2115
2116	br_multicast_start_querier(br, &br->ip4_own_query);
2117
2118#if IS_ENABLED(CONFIG_IPV6)
2119	if (!timer_pending(&br->ip6_other_query.timer))
2120		br->ip6_other_query.delay_time = jiffies + max_delay;
2121
2122	br_multicast_start_querier(br, &br->ip6_own_query);
2123#endif
2124
2125unlock:
2126	spin_unlock_bh(&br->multicast_lock);
2127
2128	return 0;
2129}
2130
2131int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
 
2132{
2133	/* Currently we support only version 2 and 3 */
2134	switch (val) {
2135	case 2:
2136	case 3:
2137		break;
2138	default:
2139		return -EINVAL;
2140	}
2141
2142	spin_lock_bh(&br->multicast_lock);
2143	br->multicast_igmp_version = val;
2144	spin_unlock_bh(&br->multicast_lock);
2145
2146	return 0;
2147}
2148
2149#if IS_ENABLED(CONFIG_IPV6)
2150int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
 
2151{
2152	/* Currently we support version 1 and 2 */
2153	switch (val) {
2154	case 1:
2155	case 2:
2156		break;
2157	default:
2158		return -EINVAL;
2159	}
2160
2161	spin_lock_bh(&br->multicast_lock);
2162	br->multicast_mld_version = val;
2163	spin_unlock_bh(&br->multicast_lock);
2164
2165	return 0;
2166}
2167#endif
2168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2169/**
2170 * br_multicast_list_adjacent - Returns snooped multicast addresses
2171 * @dev:	The bridge port adjacent to which to retrieve addresses
2172 * @br_ip_list:	The list to store found, snooped multicast IP addresses in
2173 *
2174 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2175 * snooping feature on all bridge ports of dev's bridge device, excluding
2176 * the addresses from dev itself.
2177 *
2178 * Returns the number of items added to br_ip_list.
2179 *
2180 * Notes:
2181 * - br_ip_list needs to be initialized by caller
2182 * - br_ip_list might contain duplicates in the end
2183 *   (needs to be taken care of by caller)
2184 * - br_ip_list needs to be freed by caller
2185 */
2186int br_multicast_list_adjacent(struct net_device *dev,
2187			       struct list_head *br_ip_list)
2188{
2189	struct net_bridge *br;
2190	struct net_bridge_port *port;
2191	struct net_bridge_port_group *group;
2192	struct br_ip_list *entry;
2193	int count = 0;
2194
2195	rcu_read_lock();
2196	if (!br_ip_list || !netif_is_bridge_port(dev))
2197		goto unlock;
2198
2199	port = br_port_get_rcu(dev);
2200	if (!port || !port->br)
2201		goto unlock;
2202
2203	br = port->br;
2204
2205	list_for_each_entry_rcu(port, &br->port_list, list) {
2206		if (!port->dev || port->dev == dev)
2207			continue;
2208
2209		hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2210			entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2211			if (!entry)
2212				goto unlock;
2213
2214			entry->addr = group->addr;
2215			list_add(&entry->list, br_ip_list);
2216			count++;
2217		}
2218	}
2219
2220unlock:
2221	rcu_read_unlock();
2222	return count;
2223}
2224EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2225
2226/**
2227 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2228 * @dev: The bridge port providing the bridge on which to check for a querier
2229 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2230 *
2231 * Checks whether the given interface has a bridge on top and if so returns
2232 * true if a valid querier exists anywhere on the bridged link layer.
2233 * Otherwise returns false.
2234 */
2235bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2236{
2237	struct net_bridge *br;
2238	struct net_bridge_port *port;
2239	struct ethhdr eth;
2240	bool ret = false;
2241
2242	rcu_read_lock();
2243	if (!netif_is_bridge_port(dev))
2244		goto unlock;
2245
2246	port = br_port_get_rcu(dev);
2247	if (!port || !port->br)
2248		goto unlock;
2249
2250	br = port->br;
2251
2252	memset(&eth, 0, sizeof(eth));
2253	eth.h_proto = htons(proto);
2254
2255	ret = br_multicast_querier_exists(br, &eth);
2256
2257unlock:
2258	rcu_read_unlock();
2259	return ret;
2260}
2261EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2262
2263/**
2264 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2265 * @dev: The bridge port adjacent to which to check for a querier
2266 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2267 *
2268 * Checks whether the given interface has a bridge on top and if so returns
2269 * true if a selected querier is behind one of the other ports of this
2270 * bridge. Otherwise returns false.
2271 */
2272bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2273{
 
2274	struct net_bridge *br;
2275	struct net_bridge_port *port;
2276	bool ret = false;
 
2277
2278	rcu_read_lock();
2279	if (!netif_is_bridge_port(dev))
2280		goto unlock;
2281
2282	port = br_port_get_rcu(dev);
2283	if (!port || !port->br)
2284		goto unlock;
2285
2286	br = port->br;
 
2287
2288	switch (proto) {
2289	case ETH_P_IP:
2290		if (!timer_pending(&br->ip4_other_query.timer) ||
2291		    rcu_dereference(br->ip4_querier.port) == port)
 
2292			goto unlock;
2293		break;
2294#if IS_ENABLED(CONFIG_IPV6)
2295	case ETH_P_IPV6:
2296		if (!timer_pending(&br->ip6_other_query.timer) ||
2297		    rcu_dereference(br->ip6_querier.port) == port)
 
2298			goto unlock;
2299		break;
2300#endif
2301	default:
2302		goto unlock;
2303	}
2304
2305	ret = true;
2306unlock:
2307	rcu_read_unlock();
2308	return ret;
2309}
2310EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2312static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2313			       const struct sk_buff *skb, u8 type, u8 dir)
2314{
2315	struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2316	__be16 proto = skb->protocol;
2317	unsigned int t_len;
2318
2319	u64_stats_update_begin(&pstats->syncp);
2320	switch (proto) {
2321	case htons(ETH_P_IP):
2322		t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2323		switch (type) {
2324		case IGMP_HOST_MEMBERSHIP_REPORT:
2325			pstats->mstats.igmp_v1reports[dir]++;
2326			break;
2327		case IGMPV2_HOST_MEMBERSHIP_REPORT:
2328			pstats->mstats.igmp_v2reports[dir]++;
2329			break;
2330		case IGMPV3_HOST_MEMBERSHIP_REPORT:
2331			pstats->mstats.igmp_v3reports[dir]++;
2332			break;
2333		case IGMP_HOST_MEMBERSHIP_QUERY:
2334			if (t_len != sizeof(struct igmphdr)) {
2335				pstats->mstats.igmp_v3queries[dir]++;
2336			} else {
2337				unsigned int offset = skb_transport_offset(skb);
2338				struct igmphdr *ih, _ihdr;
2339
2340				ih = skb_header_pointer(skb, offset,
2341							sizeof(_ihdr), &_ihdr);
2342				if (!ih)
2343					break;
2344				if (!ih->code)
2345					pstats->mstats.igmp_v1queries[dir]++;
2346				else
2347					pstats->mstats.igmp_v2queries[dir]++;
2348			}
2349			break;
2350		case IGMP_HOST_LEAVE_MESSAGE:
2351			pstats->mstats.igmp_leaves[dir]++;
2352			break;
2353		}
2354		break;
2355#if IS_ENABLED(CONFIG_IPV6)
2356	case htons(ETH_P_IPV6):
2357		t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2358			sizeof(struct ipv6hdr);
2359		t_len -= skb_network_header_len(skb);
2360		switch (type) {
2361		case ICMPV6_MGM_REPORT:
2362			pstats->mstats.mld_v1reports[dir]++;
2363			break;
2364		case ICMPV6_MLD2_REPORT:
2365			pstats->mstats.mld_v2reports[dir]++;
2366			break;
2367		case ICMPV6_MGM_QUERY:
2368			if (t_len != sizeof(struct mld_msg))
2369				pstats->mstats.mld_v2queries[dir]++;
2370			else
2371				pstats->mstats.mld_v1queries[dir]++;
2372			break;
2373		case ICMPV6_MGM_REDUCTION:
2374			pstats->mstats.mld_leaves[dir]++;
2375			break;
2376		}
2377		break;
2378#endif /* CONFIG_IPV6 */
2379	}
2380	u64_stats_update_end(&pstats->syncp);
2381}
2382
2383void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
 
2384			const struct sk_buff *skb, u8 type, u8 dir)
2385{
2386	struct bridge_mcast_stats __percpu *stats;
2387
2388	/* if multicast_disabled is true then igmp type can't be set */
2389	if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2390		return;
2391
2392	if (p)
2393		stats = p->mcast_stats;
2394	else
2395		stats = br->mcast_stats;
2396	if (WARN_ON(!stats))
2397		return;
2398
2399	br_mcast_stats_add(stats, skb, type, dir);
2400}
2401
2402int br_multicast_init_stats(struct net_bridge *br)
2403{
2404	br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2405	if (!br->mcast_stats)
2406		return -ENOMEM;
2407
2408	return 0;
2409}
2410
2411void br_multicast_uninit_stats(struct net_bridge *br)
2412{
2413	free_percpu(br->mcast_stats);
2414}
2415
2416static void mcast_stats_add_dir(u64 *dst, u64 *src)
 
2417{
2418	dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2419	dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2420}
2421
2422void br_multicast_get_stats(const struct net_bridge *br,
2423			    const struct net_bridge_port *p,
2424			    struct br_mcast_stats *dest)
2425{
2426	struct bridge_mcast_stats __percpu *stats;
2427	struct br_mcast_stats tdst;
2428	int i;
2429
2430	memset(dest, 0, sizeof(*dest));
2431	if (p)
2432		stats = p->mcast_stats;
2433	else
2434		stats = br->mcast_stats;
2435	if (WARN_ON(!stats))
2436		return;
2437
2438	memset(&tdst, 0, sizeof(tdst));
2439	for_each_possible_cpu(i) {
2440		struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2441		struct br_mcast_stats temp;
2442		unsigned int start;
2443
2444		do {
2445			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2446			memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2447		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2448
2449		mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2450		mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2451		mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2452		mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2453		mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2454		mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2455		mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2456		tdst.igmp_parse_errors += temp.igmp_parse_errors;
2457
2458		mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2459		mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2460		mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2461		mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2462		mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2463		tdst.mld_parse_errors += temp.mld_parse_errors;
2464	}
2465	memcpy(dest, &tdst, sizeof(*dest));
2466}
2467
2468int br_mdb_hash_init(struct net_bridge *br)
2469{
2470	return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
 
 
 
 
 
 
 
 
 
 
 
 
2471}
2472
2473void br_mdb_hash_fini(struct net_bridge *br)
2474{
 
2475	rhashtable_destroy(&br->mdb_hash_tbl);
2476}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Bridge multicast support.
   4 *
   5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
   6 */
   7
   8#include <linux/err.h>
   9#include <linux/export.h>
  10#include <linux/if_ether.h>
  11#include <linux/igmp.h>
  12#include <linux/in.h>
  13#include <linux/jhash.h>
  14#include <linux/kernel.h>
  15#include <linux/log2.h>
  16#include <linux/netdevice.h>
  17#include <linux/netfilter_bridge.h>
  18#include <linux/random.h>
  19#include <linux/rculist.h>
  20#include <linux/skbuff.h>
  21#include <linux/slab.h>
  22#include <linux/timer.h>
  23#include <linux/inetdevice.h>
  24#include <linux/mroute.h>
  25#include <net/ip.h>
  26#include <net/switchdev.h>
  27#if IS_ENABLED(CONFIG_IPV6)
  28#include <linux/icmpv6.h>
  29#include <net/ipv6.h>
  30#include <net/mld.h>
  31#include <net/ip6_checksum.h>
  32#include <net/addrconf.h>
  33#endif
  34
  35#include "br_private.h"
  36#include "br_private_mcast_eht.h"
  37
  38static const struct rhashtable_params br_mdb_rht_params = {
  39	.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
  40	.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
  41	.key_len = sizeof(struct br_ip),
  42	.automatic_shrinking = true,
  43};
  44
  45static const struct rhashtable_params br_sg_port_rht_params = {
  46	.head_offset = offsetof(struct net_bridge_port_group, rhnode),
  47	.key_offset = offsetof(struct net_bridge_port_group, key),
  48	.key_len = sizeof(struct net_bridge_port_group_sg_key),
  49	.automatic_shrinking = true,
  50};
  51
  52static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
  53				       struct bridge_mcast_own_query *query);
  54static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
  55					struct net_bridge_mcast_port *pmctx);
  56static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
  57					 struct net_bridge_mcast_port *pmctx,
  58					 __be32 group,
  59					 __u16 vid,
  60					 const unsigned char *src);
  61static void br_multicast_port_group_rexmit(struct timer_list *t);
  62
  63static void
  64br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
  65static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
  66					struct net_bridge_mcast_port *pmctx);
  67#if IS_ENABLED(CONFIG_IPV6)
  68static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
  69					 struct net_bridge_mcast_port *pmctx,
  70					 const struct in6_addr *group,
  71					 __u16 vid, const unsigned char *src);
  72#endif
  73static struct net_bridge_port_group *
  74__br_multicast_add_group(struct net_bridge_mcast *brmctx,
  75			 struct net_bridge_mcast_port *pmctx,
  76			 struct br_ip *group,
  77			 const unsigned char *src,
  78			 u8 filter_mode,
  79			 bool igmpv2_mldv1,
  80			 bool blocked);
  81static void br_multicast_find_del_pg(struct net_bridge *br,
  82				     struct net_bridge_port_group *pg);
  83static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
  84
  85static int br_mc_disabled_update(struct net_device *dev, bool value,
  86				 struct netlink_ext_ack *extack);
  87
  88static struct net_bridge_port_group *
  89br_sg_port_find(struct net_bridge *br,
  90		struct net_bridge_port_group_sg_key *sg_p)
  91{
  92	lockdep_assert_held_once(&br->multicast_lock);
  93
  94	return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
  95				      br_sg_port_rht_params);
  96}
  97
  98static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
  99						      struct br_ip *dst)
 100{
 101	return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
 102}
 103
 104struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
 105					   struct br_ip *dst)
 106{
 107	struct net_bridge_mdb_entry *ent;
 108
 109	lockdep_assert_held_once(&br->multicast_lock);
 110
 111	rcu_read_lock();
 112	ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
 113	rcu_read_unlock();
 114
 115	return ent;
 116}
 117
 118static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
 119						   __be32 dst, __u16 vid)
 120{
 121	struct br_ip br_dst;
 122
 123	memset(&br_dst, 0, sizeof(br_dst));
 124	br_dst.dst.ip4 = dst;
 125	br_dst.proto = htons(ETH_P_IP);
 126	br_dst.vid = vid;
 127
 128	return br_mdb_ip_get(br, &br_dst);
 129}
 130
 131#if IS_ENABLED(CONFIG_IPV6)
 132static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
 133						   const struct in6_addr *dst,
 134						   __u16 vid)
 135{
 136	struct br_ip br_dst;
 137
 138	memset(&br_dst, 0, sizeof(br_dst));
 139	br_dst.dst.ip6 = *dst;
 140	br_dst.proto = htons(ETH_P_IPV6);
 141	br_dst.vid = vid;
 142
 143	return br_mdb_ip_get(br, &br_dst);
 144}
 145#endif
 146
 147struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
 148					struct sk_buff *skb, u16 vid)
 149{
 150	struct net_bridge *br = brmctx->br;
 151	struct br_ip ip;
 152
 153	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
 154	    br_multicast_ctx_vlan_global_disabled(brmctx))
 155		return NULL;
 156
 157	if (BR_INPUT_SKB_CB(skb)->igmp)
 158		return NULL;
 159
 160	memset(&ip, 0, sizeof(ip));
 161	ip.proto = skb->protocol;
 162	ip.vid = vid;
 163
 164	switch (skb->protocol) {
 165	case htons(ETH_P_IP):
 166		ip.dst.ip4 = ip_hdr(skb)->daddr;
 167		if (brmctx->multicast_igmp_version == 3) {
 168			struct net_bridge_mdb_entry *mdb;
 169
 170			ip.src.ip4 = ip_hdr(skb)->saddr;
 171			mdb = br_mdb_ip_get_rcu(br, &ip);
 172			if (mdb)
 173				return mdb;
 174			ip.src.ip4 = 0;
 175		}
 176		break;
 177#if IS_ENABLED(CONFIG_IPV6)
 178	case htons(ETH_P_IPV6):
 179		ip.dst.ip6 = ipv6_hdr(skb)->daddr;
 180		if (brmctx->multicast_mld_version == 2) {
 181			struct net_bridge_mdb_entry *mdb;
 182
 183			ip.src.ip6 = ipv6_hdr(skb)->saddr;
 184			mdb = br_mdb_ip_get_rcu(br, &ip);
 185			if (mdb)
 186				return mdb;
 187			memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
 188		}
 189		break;
 190#endif
 191	default:
 192		ip.proto = 0;
 193		ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
 194	}
 195
 196	return br_mdb_ip_get_rcu(br, &ip);
 197}
 198
 199/* IMPORTANT: this function must be used only when the contexts cannot be
 200 * passed down (e.g. timer) and must be used for read-only purposes because
 201 * the vlan snooping option can change, so it can return any context
 202 * (non-vlan or vlan). Its initial intended purpose is to read timer values
 203 * from the *current* context based on the option. At worst that could lead
 204 * to inconsistent timers when the contexts are changed, i.e. src timer
 205 * which needs to re-arm with a specific delay taken from the old context
 206 */
 207static struct net_bridge_mcast_port *
 208br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
 209{
 210	struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
 211	struct net_bridge_vlan *vlan;
 212
 213	lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
 214
 215	/* if vlan snooping is disabled use the port's multicast context */
 216	if (!pg->key.addr.vid ||
 217	    !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
 218		goto out;
 219
 220	/* locking is tricky here, due to different rules for multicast and
 221	 * vlans we need to take rcu to find the vlan and make sure it has
 222	 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
 223	 * multicast_lock which must be already held here, so the vlan's pmctx
 224	 * can safely be used on return
 225	 */
 226	rcu_read_lock();
 227	vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
 228	if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
 229		pmctx = &vlan->port_mcast_ctx;
 230	else
 231		pmctx = NULL;
 232	rcu_read_unlock();
 233out:
 234	return pmctx;
 235}
 236
 237/* when snooping we need to check if the contexts should be used
 238 * in the following order:
 239 * - if pmctx is non-NULL (port), check if it should be used
 240 * - if pmctx is NULL (bridge), check if brmctx should be used
 241 */
 242static bool
 243br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
 244			    const struct net_bridge_mcast_port *pmctx)
 245{
 246	if (!netif_running(brmctx->br->dev))
 247		return false;
 248
 249	if (pmctx)
 250		return !br_multicast_port_ctx_state_disabled(pmctx);
 251	else
 252		return !br_multicast_ctx_vlan_disabled(brmctx);
 253}
 254
 255static bool br_port_group_equal(struct net_bridge_port_group *p,
 256				struct net_bridge_port *port,
 257				const unsigned char *src)
 258{
 259	if (p->key.port != port)
 260		return false;
 261
 262	if (!(port->flags & BR_MULTICAST_TO_UNICAST))
 263		return true;
 264
 265	return ether_addr_equal(src, p->eth_addr);
 266}
 267
 268static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
 269				struct net_bridge_port_group *pg,
 270				struct br_ip *sg_ip)
 271{
 272	struct net_bridge_port_group_sg_key sg_key;
 273	struct net_bridge_port_group *src_pg;
 274	struct net_bridge_mcast *brmctx;
 275
 276	memset(&sg_key, 0, sizeof(sg_key));
 277	brmctx = br_multicast_port_ctx_get_global(pmctx);
 278	sg_key.port = pg->key.port;
 279	sg_key.addr = *sg_ip;
 280	if (br_sg_port_find(brmctx->br, &sg_key))
 281		return;
 282
 283	src_pg = __br_multicast_add_group(brmctx, pmctx,
 284					  sg_ip, pg->eth_addr,
 285					  MCAST_INCLUDE, false, false);
 286	if (IS_ERR_OR_NULL(src_pg) ||
 287	    src_pg->rt_protocol != RTPROT_KERNEL)
 288		return;
 289
 290	src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
 291}
 292
 293static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
 294				struct br_ip *sg_ip)
 295{
 296	struct net_bridge_port_group_sg_key sg_key;
 297	struct net_bridge *br = pg->key.port->br;
 298	struct net_bridge_port_group *src_pg;
 299
 300	memset(&sg_key, 0, sizeof(sg_key));
 301	sg_key.port = pg->key.port;
 302	sg_key.addr = *sg_ip;
 303	src_pg = br_sg_port_find(br, &sg_key);
 304	if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
 305	    src_pg->rt_protocol != RTPROT_KERNEL)
 306		return;
 307
 308	br_multicast_find_del_pg(br, src_pg);
 309}
 310
 311/* When a port group transitions to (or is added as) EXCLUDE we need to add it
 312 * to all other ports' S,G entries which are not blocked by the current group
 313 * for proper replication, the assumption is that any S,G blocked entries
 314 * are already added so the S,G,port lookup should skip them.
 315 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
 316 * deleted we need to remove it from all ports' S,G entries where it was
 317 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
 318 */
 319void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
 320				     u8 filter_mode)
 321{
 322	struct net_bridge *br = pg->key.port->br;
 323	struct net_bridge_port_group *pg_lst;
 324	struct net_bridge_mcast_port *pmctx;
 325	struct net_bridge_mdb_entry *mp;
 326	struct br_ip sg_ip;
 327
 328	if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
 329		return;
 330
 331	mp = br_mdb_ip_get(br, &pg->key.addr);
 332	if (!mp)
 333		return;
 334	pmctx = br_multicast_pg_to_port_ctx(pg);
 335	if (!pmctx)
 336		return;
 337
 338	memset(&sg_ip, 0, sizeof(sg_ip));
 339	sg_ip = pg->key.addr;
 340
 341	for (pg_lst = mlock_dereference(mp->ports, br);
 342	     pg_lst;
 343	     pg_lst = mlock_dereference(pg_lst->next, br)) {
 344		struct net_bridge_group_src *src_ent;
 345
 346		if (pg_lst == pg)
 347			continue;
 348		hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
 349			if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
 350				continue;
 351			sg_ip.src = src_ent->addr.src;
 352			switch (filter_mode) {
 353			case MCAST_INCLUDE:
 354				__fwd_del_star_excl(pg, &sg_ip);
 355				break;
 356			case MCAST_EXCLUDE:
 357				__fwd_add_star_excl(pmctx, pg, &sg_ip);
 358				break;
 359			}
 360		}
 361	}
 362}
 363
 364/* called when adding a new S,G with host_joined == false by default */
 365static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
 366				       struct net_bridge_port_group *sg)
 367{
 368	struct net_bridge_mdb_entry *sg_mp;
 369
 370	if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
 371		return;
 372	if (!star_mp->host_joined)
 373		return;
 374
 375	sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
 376	if (!sg_mp)
 377		return;
 378	sg_mp->host_joined = true;
 379}
 380
 381/* set the host_joined state of all of *,G's S,G entries */
 382static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
 383{
 384	struct net_bridge *br = star_mp->br;
 385	struct net_bridge_mdb_entry *sg_mp;
 386	struct net_bridge_port_group *pg;
 387	struct br_ip sg_ip;
 388
 389	if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
 390		return;
 391
 392	memset(&sg_ip, 0, sizeof(sg_ip));
 393	sg_ip = star_mp->addr;
 394	for (pg = mlock_dereference(star_mp->ports, br);
 395	     pg;
 396	     pg = mlock_dereference(pg->next, br)) {
 397		struct net_bridge_group_src *src_ent;
 398
 399		hlist_for_each_entry(src_ent, &pg->src_list, node) {
 400			if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
 401				continue;
 402			sg_ip.src = src_ent->addr.src;
 403			sg_mp = br_mdb_ip_get(br, &sg_ip);
 404			if (!sg_mp)
 405				continue;
 406			sg_mp->host_joined = star_mp->host_joined;
 407		}
 408	}
 409}
 410
 411static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
 412{
 413	struct net_bridge_port_group __rcu **pp;
 414	struct net_bridge_port_group *p;
 415
 416	/* *,G exclude ports are only added to S,G entries */
 417	if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
 418		return;
 419
 420	/* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
 421	 * we should ignore perm entries since they're managed by user-space
 422	 */
 423	for (pp = &sgmp->ports;
 424	     (p = mlock_dereference(*pp, sgmp->br)) != NULL;
 425	     pp = &p->next)
 426		if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
 427				  MDB_PG_FLAGS_PERMANENT)))
 428			return;
 429
 430	/* currently the host can only have joined the *,G which means
 431	 * we treat it as EXCLUDE {}, so for an S,G it's considered a
 432	 * STAR_EXCLUDE entry and we can safely leave it
 433	 */
 434	sgmp->host_joined = false;
 435
 436	for (pp = &sgmp->ports;
 437	     (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
 438		if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
 439			br_multicast_del_pg(sgmp, p, pp);
 440		else
 441			pp = &p->next;
 442	}
 443}
 444
 445void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
 446				       struct net_bridge_port_group *sg)
 447{
 448	struct net_bridge_port_group_sg_key sg_key;
 449	struct net_bridge *br = star_mp->br;
 450	struct net_bridge_mcast_port *pmctx;
 451	struct net_bridge_port_group *pg;
 452	struct net_bridge_mcast *brmctx;
 453
 454	if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
 455		return;
 456	if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
 457		return;
 458
 459	br_multicast_sg_host_state(star_mp, sg);
 460	memset(&sg_key, 0, sizeof(sg_key));
 461	sg_key.addr = sg->key.addr;
 462	/* we need to add all exclude ports to the S,G */
 463	for (pg = mlock_dereference(star_mp->ports, br);
 464	     pg;
 465	     pg = mlock_dereference(pg->next, br)) {
 466		struct net_bridge_port_group *src_pg;
 467
 468		if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
 469			continue;
 470
 471		sg_key.port = pg->key.port;
 472		if (br_sg_port_find(br, &sg_key))
 473			continue;
 474
 475		pmctx = br_multicast_pg_to_port_ctx(pg);
 476		if (!pmctx)
 477			continue;
 478		brmctx = br_multicast_port_ctx_get_global(pmctx);
 479
 480		src_pg = __br_multicast_add_group(brmctx, pmctx,
 481						  &sg->key.addr,
 482						  sg->eth_addr,
 483						  MCAST_INCLUDE, false, false);
 484		if (IS_ERR_OR_NULL(src_pg) ||
 485		    src_pg->rt_protocol != RTPROT_KERNEL)
 486			continue;
 487		src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
 488	}
 489}
 490
 491static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
 492{
 493	struct net_bridge_mdb_entry *star_mp;
 494	struct net_bridge_mcast_port *pmctx;
 495	struct net_bridge_port_group *sg;
 496	struct net_bridge_mcast *brmctx;
 497	struct br_ip sg_ip;
 498
 499	if (src->flags & BR_SGRP_F_INSTALLED)
 500		return;
 501
 502	memset(&sg_ip, 0, sizeof(sg_ip));
 503	pmctx = br_multicast_pg_to_port_ctx(src->pg);
 504	if (!pmctx)
 505		return;
 506	brmctx = br_multicast_port_ctx_get_global(pmctx);
 507	sg_ip = src->pg->key.addr;
 508	sg_ip.src = src->addr.src;
 509
 510	sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
 511				      src->pg->eth_addr, MCAST_INCLUDE, false,
 512				      !timer_pending(&src->timer));
 513	if (IS_ERR_OR_NULL(sg))
 514		return;
 515	src->flags |= BR_SGRP_F_INSTALLED;
 516	sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
 517
 518	/* if it was added by user-space as perm we can skip next steps */
 519	if (sg->rt_protocol != RTPROT_KERNEL &&
 520	    (sg->flags & MDB_PG_FLAGS_PERMANENT))
 521		return;
 522
 523	/* the kernel is now responsible for removing this S,G */
 524	del_timer(&sg->timer);
 525	star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
 526	if (!star_mp)
 527		return;
 528
 529	br_multicast_sg_add_exclude_ports(star_mp, sg);
 530}
 531
 532static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
 533					bool fastleave)
 534{
 535	struct net_bridge_port_group *p, *pg = src->pg;
 536	struct net_bridge_port_group __rcu **pp;
 537	struct net_bridge_mdb_entry *mp;
 538	struct br_ip sg_ip;
 539
 540	memset(&sg_ip, 0, sizeof(sg_ip));
 541	sg_ip = pg->key.addr;
 542	sg_ip.src = src->addr.src;
 543
 544	mp = br_mdb_ip_get(src->br, &sg_ip);
 545	if (!mp)
 546		return;
 547
 548	for (pp = &mp->ports;
 549	     (p = mlock_dereference(*pp, src->br)) != NULL;
 550	     pp = &p->next) {
 551		if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
 552			continue;
 553
 554		if (p->rt_protocol != RTPROT_KERNEL &&
 555		    (p->flags & MDB_PG_FLAGS_PERMANENT) &&
 556		    !(src->flags & BR_SGRP_F_USER_ADDED))
 557			break;
 558
 559		if (fastleave)
 560			p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
 561		br_multicast_del_pg(mp, p, pp);
 562		break;
 563	}
 564	src->flags &= ~BR_SGRP_F_INSTALLED;
 565}
 566
 567/* install S,G and based on src's timer enable or disable forwarding */
 568static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
 569{
 570	struct net_bridge_port_group_sg_key sg_key;
 571	struct net_bridge_port_group *sg;
 572	u8 old_flags;
 573
 574	br_multicast_fwd_src_add(src);
 575
 576	memset(&sg_key, 0, sizeof(sg_key));
 577	sg_key.addr = src->pg->key.addr;
 578	sg_key.addr.src = src->addr.src;
 579	sg_key.port = src->pg->key.port;
 580
 581	sg = br_sg_port_find(src->br, &sg_key);
 582	if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
 583		return;
 584
 585	old_flags = sg->flags;
 586	if (timer_pending(&src->timer))
 587		sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
 588	else
 589		sg->flags |= MDB_PG_FLAGS_BLOCKED;
 590
 591	if (old_flags != sg->flags) {
 592		struct net_bridge_mdb_entry *sg_mp;
 593
 594		sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
 595		if (!sg_mp)
 596			return;
 597		br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
 598	}
 599}
 600
 601static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
 602{
 603	struct net_bridge_mdb_entry *mp;
 604
 605	mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
 606	WARN_ON(!hlist_unhashed(&mp->mdb_node));
 607	WARN_ON(mp->ports);
 608
 609	timer_shutdown_sync(&mp->timer);
 610	kfree_rcu(mp, rcu);
 611}
 612
 613static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
 614{
 615	struct net_bridge *br = mp->br;
 616
 617	rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
 618			       br_mdb_rht_params);
 619	hlist_del_init_rcu(&mp->mdb_node);
 620	hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
 621	queue_work(system_long_wq, &br->mcast_gc_work);
 622}
 623
 624static void br_multicast_group_expired(struct timer_list *t)
 625{
 626	struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
 627	struct net_bridge *br = mp->br;
 628
 629	spin_lock(&br->multicast_lock);
 630	if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
 631	    timer_pending(&mp->timer))
 632		goto out;
 633
 634	br_multicast_host_leave(mp, true);
 635
 636	if (mp->ports)
 637		goto out;
 638	br_multicast_del_mdb_entry(mp);
 639out:
 640	spin_unlock(&br->multicast_lock);
 641}
 642
 643static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
 644{
 645	struct net_bridge_group_src *src;
 646
 647	src = container_of(gc, struct net_bridge_group_src, mcast_gc);
 648	WARN_ON(!hlist_unhashed(&src->node));
 649
 650	timer_shutdown_sync(&src->timer);
 651	kfree_rcu(src, rcu);
 652}
 653
 654void __br_multicast_del_group_src(struct net_bridge_group_src *src)
 655{
 656	struct net_bridge *br = src->pg->key.port->br;
 657
 658	hlist_del_init_rcu(&src->node);
 659	src->pg->src_ents--;
 660	hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
 661	queue_work(system_long_wq, &br->mcast_gc_work);
 662}
 663
 664void br_multicast_del_group_src(struct net_bridge_group_src *src,
 665				bool fastleave)
 666{
 667	br_multicast_fwd_src_remove(src, fastleave);
 668	__br_multicast_del_group_src(src);
 669}
 670
 671static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
 672{
 673	struct net_bridge_port_group *pg;
 674
 675	pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
 676	WARN_ON(!hlist_unhashed(&pg->mglist));
 677	WARN_ON(!hlist_empty(&pg->src_list));
 678
 679	timer_shutdown_sync(&pg->rexmit_timer);
 680	timer_shutdown_sync(&pg->timer);
 681	kfree_rcu(pg, rcu);
 682}
 683
 684void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
 685			 struct net_bridge_port_group *pg,
 686			 struct net_bridge_port_group __rcu **pp)
 687{
 688	struct net_bridge *br = pg->key.port->br;
 689	struct net_bridge_group_src *ent;
 690	struct hlist_node *tmp;
 691
 692	rcu_assign_pointer(*pp, pg->next);
 693	hlist_del_init(&pg->mglist);
 694	br_multicast_eht_clean_sets(pg);
 695	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
 696		br_multicast_del_group_src(ent, false);
 697	br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
 698	if (!br_multicast_is_star_g(&mp->addr)) {
 699		rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
 700				       br_sg_port_rht_params);
 701		br_multicast_sg_del_exclude_ports(mp);
 702	} else {
 703		br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
 704	}
 705	hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
 706	queue_work(system_long_wq, &br->mcast_gc_work);
 707
 708	if (!mp->ports && !mp->host_joined && netif_running(br->dev))
 709		mod_timer(&mp->timer, jiffies);
 710}
 711
 712static void br_multicast_find_del_pg(struct net_bridge *br,
 713				     struct net_bridge_port_group *pg)
 714{
 715	struct net_bridge_port_group __rcu **pp;
 716	struct net_bridge_mdb_entry *mp;
 717	struct net_bridge_port_group *p;
 
 718
 719	mp = br_mdb_ip_get(br, &pg->key.addr);
 720	if (WARN_ON(!mp))
 721		return;
 722
 723	for (pp = &mp->ports;
 724	     (p = mlock_dereference(*pp, br)) != NULL;
 725	     pp = &p->next) {
 726		if (p != pg)
 727			continue;
 728
 729		br_multicast_del_pg(mp, pg, pp);
 
 
 
 
 
 
 
 
 
 
 730		return;
 731	}
 732
 733	WARN_ON(1);
 734}
 735
 736static void br_multicast_port_group_expired(struct timer_list *t)
 737{
 738	struct net_bridge_port_group *pg = from_timer(pg, t, timer);
 739	struct net_bridge_group_src *src_ent;
 740	struct net_bridge *br = pg->key.port->br;
 741	struct hlist_node *tmp;
 742	bool changed;
 743
 744	spin_lock(&br->multicast_lock);
 745	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
 746	    hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
 747		goto out;
 748
 749	changed = !!(pg->filter_mode == MCAST_EXCLUDE);
 750	pg->filter_mode = MCAST_INCLUDE;
 751	hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
 752		if (!timer_pending(&src_ent->timer)) {
 753			br_multicast_del_group_src(src_ent, false);
 754			changed = true;
 755		}
 756	}
 757
 758	if (hlist_empty(&pg->src_list)) {
 759		br_multicast_find_del_pg(br, pg);
 760	} else if (changed) {
 761		struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
 762
 763		if (changed && br_multicast_is_star_g(&pg->key.addr))
 764			br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
 765
 766		if (WARN_ON(!mp))
 767			goto out;
 768		br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
 769	}
 770out:
 771	spin_unlock(&br->multicast_lock);
 772}
 773
 774static void br_multicast_gc(struct hlist_head *head)
 775{
 776	struct net_bridge_mcast_gc *gcent;
 777	struct hlist_node *tmp;
 778
 779	hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
 780		hlist_del_init(&gcent->gc_node);
 781		gcent->destroy(gcent);
 782	}
 783}
 784
 785static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
 786					     struct net_bridge_mcast_port *pmctx,
 787					     struct sk_buff *skb)
 788{
 789	struct net_bridge_vlan *vlan = NULL;
 790
 791	if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
 792		vlan = pmctx->vlan;
 793	else if (br_multicast_ctx_is_vlan(brmctx))
 794		vlan = brmctx->vlan;
 795
 796	if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
 797		u16 vlan_proto;
 798
 799		if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
 800			return;
 801		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
 802	}
 803}
 804
 805static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
 806						    struct net_bridge_mcast_port *pmctx,
 807						    struct net_bridge_port_group *pg,
 808						    __be32 ip_dst, __be32 group,
 809						    bool with_srcs, bool over_lmqt,
 810						    u8 sflag, u8 *igmp_type,
 811						    bool *need_rexmit)
 812{
 813	struct net_bridge_port *p = pg ? pg->key.port : NULL;
 814	struct net_bridge_group_src *ent;
 815	size_t pkt_size, igmp_hdr_size;
 816	unsigned long now = jiffies;
 817	struct igmpv3_query *ihv3;
 818	void *csum_start = NULL;
 819	__sum16 *csum = NULL;
 820	struct sk_buff *skb;
 821	struct igmphdr *ih;
 822	struct ethhdr *eth;
 823	unsigned long lmqt;
 824	struct iphdr *iph;
 825	u16 lmqt_srcs = 0;
 826
 827	igmp_hdr_size = sizeof(*ih);
 828	if (brmctx->multicast_igmp_version == 3) {
 829		igmp_hdr_size = sizeof(*ihv3);
 830		if (pg && with_srcs) {
 831			lmqt = now + (brmctx->multicast_last_member_interval *
 832				      brmctx->multicast_last_member_count);
 833			hlist_for_each_entry(ent, &pg->src_list, node) {
 834				if (over_lmqt == time_after(ent->timer.expires,
 835							    lmqt) &&
 836				    ent->src_query_rexmit_cnt > 0)
 837					lmqt_srcs++;
 838			}
 839
 840			if (!lmqt_srcs)
 841				return NULL;
 842			igmp_hdr_size += lmqt_srcs * sizeof(__be32);
 843		}
 844	}
 845
 846	pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
 847	if ((p && pkt_size > p->dev->mtu) ||
 848	    pkt_size > brmctx->br->dev->mtu)
 849		return NULL;
 850
 851	skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
 852	if (!skb)
 853		goto out;
 854
 855	__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
 856	skb->protocol = htons(ETH_P_IP);
 857
 858	skb_reset_mac_header(skb);
 859	eth = eth_hdr(skb);
 860
 861	ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
 862	ip_eth_mc_map(ip_dst, eth->h_dest);
 
 
 
 
 
 863	eth->h_proto = htons(ETH_P_IP);
 864	skb_put(skb, sizeof(*eth));
 865
 866	skb_set_network_header(skb, skb->len);
 867	iph = ip_hdr(skb);
 868	iph->tot_len = htons(pkt_size - sizeof(*eth));
 869
 870	iph->version = 4;
 871	iph->ihl = 6;
 872	iph->tos = 0xc0;
 
 873	iph->id = 0;
 874	iph->frag_off = htons(IP_DF);
 875	iph->ttl = 1;
 876	iph->protocol = IPPROTO_IGMP;
 877	iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
 878		     inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
 879	iph->daddr = ip_dst;
 880	((u8 *)&iph[1])[0] = IPOPT_RA;
 881	((u8 *)&iph[1])[1] = 4;
 882	((u8 *)&iph[1])[2] = 0;
 883	((u8 *)&iph[1])[3] = 0;
 884	ip_send_check(iph);
 885	skb_put(skb, 24);
 886
 887	skb_set_transport_header(skb, skb->len);
 888	*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
 889
 890	switch (brmctx->multicast_igmp_version) {
 891	case 2:
 892		ih = igmp_hdr(skb);
 893		ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
 894		ih->code = (group ? brmctx->multicast_last_member_interval :
 895				    brmctx->multicast_query_response_interval) /
 896			   (HZ / IGMP_TIMER_SCALE);
 897		ih->group = group;
 898		ih->csum = 0;
 899		csum = &ih->csum;
 900		csum_start = (void *)ih;
 901		break;
 902	case 3:
 903		ihv3 = igmpv3_query_hdr(skb);
 904		ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
 905		ihv3->code = (group ? brmctx->multicast_last_member_interval :
 906				      brmctx->multicast_query_response_interval) /
 907			     (HZ / IGMP_TIMER_SCALE);
 908		ihv3->group = group;
 909		ihv3->qqic = brmctx->multicast_query_interval / HZ;
 910		ihv3->nsrcs = htons(lmqt_srcs);
 911		ihv3->resv = 0;
 912		ihv3->suppress = sflag;
 913		ihv3->qrv = 2;
 914		ihv3->csum = 0;
 915		csum = &ihv3->csum;
 916		csum_start = (void *)ihv3;
 917		if (!pg || !with_srcs)
 918			break;
 919
 920		lmqt_srcs = 0;
 921		hlist_for_each_entry(ent, &pg->src_list, node) {
 922			if (over_lmqt == time_after(ent->timer.expires,
 923						    lmqt) &&
 924			    ent->src_query_rexmit_cnt > 0) {
 925				ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
 926				ent->src_query_rexmit_cnt--;
 927				if (need_rexmit && ent->src_query_rexmit_cnt)
 928					*need_rexmit = true;
 929			}
 930		}
 931		if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
 932			kfree_skb(skb);
 933			return NULL;
 934		}
 935		break;
 936	}
 937
 938	if (WARN_ON(!csum || !csum_start)) {
 939		kfree_skb(skb);
 940		return NULL;
 941	}
 942
 943	*csum = ip_compute_csum(csum_start, igmp_hdr_size);
 944	skb_put(skb, igmp_hdr_size);
 945	__skb_pull(skb, sizeof(*eth));
 946
 947out:
 948	return skb;
 949}
 950
 951#if IS_ENABLED(CONFIG_IPV6)
 952static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
 953						    struct net_bridge_mcast_port *pmctx,
 954						    struct net_bridge_port_group *pg,
 955						    const struct in6_addr *ip6_dst,
 956						    const struct in6_addr *group,
 957						    bool with_srcs, bool over_llqt,
 958						    u8 sflag, u8 *igmp_type,
 959						    bool *need_rexmit)
 960{
 961	struct net_bridge_port *p = pg ? pg->key.port : NULL;
 962	struct net_bridge_group_src *ent;
 963	size_t pkt_size, mld_hdr_size;
 964	unsigned long now = jiffies;
 965	struct mld2_query *mld2q;
 966	void *csum_start = NULL;
 967	unsigned long interval;
 968	__sum16 *csum = NULL;
 969	struct ipv6hdr *ip6h;
 970	struct mld_msg *mldq;
 
 971	struct sk_buff *skb;
 972	unsigned long llqt;
 973	struct ethhdr *eth;
 974	u16 llqt_srcs = 0;
 975	u8 *hopopt;
 976
 977	mld_hdr_size = sizeof(*mldq);
 978	if (brmctx->multicast_mld_version == 2) {
 979		mld_hdr_size = sizeof(*mld2q);
 980		if (pg && with_srcs) {
 981			llqt = now + (brmctx->multicast_last_member_interval *
 982				      brmctx->multicast_last_member_count);
 983			hlist_for_each_entry(ent, &pg->src_list, node) {
 984				if (over_llqt == time_after(ent->timer.expires,
 985							    llqt) &&
 986				    ent->src_query_rexmit_cnt > 0)
 987					llqt_srcs++;
 988			}
 989
 990			if (!llqt_srcs)
 991				return NULL;
 992			mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
 993		}
 994	}
 995
 996	pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
 997	if ((p && pkt_size > p->dev->mtu) ||
 998	    pkt_size > brmctx->br->dev->mtu)
 999		return NULL;
1000
1001	skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
1002	if (!skb)
1003		goto out;
1004
1005	__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
1006	skb->protocol = htons(ETH_P_IPV6);
1007
1008	/* Ethernet header */
1009	skb_reset_mac_header(skb);
1010	eth = eth_hdr(skb);
1011
1012	ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1013	eth->h_proto = htons(ETH_P_IPV6);
1014	skb_put(skb, sizeof(*eth));
1015
1016	/* IPv6 header + HbH option */
1017	skb_set_network_header(skb, skb->len);
1018	ip6h = ipv6_hdr(skb);
1019
1020	*(__force __be32 *)ip6h = htonl(0x60000000);
1021	ip6h->payload_len = htons(8 + mld_hdr_size);
1022	ip6h->nexthdr = IPPROTO_HOPOPTS;
1023	ip6h->hop_limit = 1;
1024	ip6h->daddr = *ip6_dst;
1025	if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
1026			       &ip6h->daddr, 0, &ip6h->saddr)) {
1027		kfree_skb(skb);
1028		br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1029		return NULL;
1030	}
1031
1032	br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1033	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1034
1035	hopopt = (u8 *)(ip6h + 1);
1036	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
1037	hopopt[1] = 0;				/* length of HbH */
1038	hopopt[2] = IPV6_TLV_ROUTERALERT;	/* Router Alert */
1039	hopopt[3] = 2;				/* Length of RA Option */
1040	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
1041	hopopt[5] = 0;
1042	hopopt[6] = IPV6_TLV_PAD1;		/* Pad1 */
1043	hopopt[7] = IPV6_TLV_PAD1;		/* Pad1 */
1044
1045	skb_put(skb, sizeof(*ip6h) + 8);
1046
1047	/* ICMPv6 */
1048	skb_set_transport_header(skb, skb->len);
1049	interval = ipv6_addr_any(group) ?
1050			brmctx->multicast_query_response_interval :
1051			brmctx->multicast_last_member_interval;
1052	*igmp_type = ICMPV6_MGM_QUERY;
1053	switch (brmctx->multicast_mld_version) {
1054	case 1:
1055		mldq = (struct mld_msg *)icmp6_hdr(skb);
1056		mldq->mld_type = ICMPV6_MGM_QUERY;
1057		mldq->mld_code = 0;
1058		mldq->mld_cksum = 0;
1059		mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
1060		mldq->mld_reserved = 0;
1061		mldq->mld_mca = *group;
1062		csum = &mldq->mld_cksum;
1063		csum_start = (void *)mldq;
 
 
 
1064		break;
1065	case 2:
1066		mld2q = (struct mld2_query *)icmp6_hdr(skb);
1067		mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1068		mld2q->mld2q_type = ICMPV6_MGM_QUERY;
1069		mld2q->mld2q_code = 0;
1070		mld2q->mld2q_cksum = 0;
1071		mld2q->mld2q_resv1 = 0;
1072		mld2q->mld2q_resv2 = 0;
1073		mld2q->mld2q_suppress = sflag;
1074		mld2q->mld2q_qrv = 2;
1075		mld2q->mld2q_nsrcs = htons(llqt_srcs);
1076		mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1077		mld2q->mld2q_mca = *group;
1078		csum = &mld2q->mld2q_cksum;
1079		csum_start = (void *)mld2q;
1080		if (!pg || !with_srcs)
1081			break;
1082
1083		llqt_srcs = 0;
1084		hlist_for_each_entry(ent, &pg->src_list, node) {
1085			if (over_llqt == time_after(ent->timer.expires,
1086						    llqt) &&
1087			    ent->src_query_rexmit_cnt > 0) {
1088				mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1089				ent->src_query_rexmit_cnt--;
1090				if (need_rexmit && ent->src_query_rexmit_cnt)
1091					*need_rexmit = true;
1092			}
1093		}
1094		if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
1095			kfree_skb(skb);
1096			return NULL;
1097		}
1098		break;
1099	}
 
1100
1101	if (WARN_ON(!csum || !csum_start)) {
1102		kfree_skb(skb);
1103		return NULL;
1104	}
1105
1106	*csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1107				IPPROTO_ICMPV6,
1108				csum_partial(csum_start, mld_hdr_size, 0));
1109	skb_put(skb, mld_hdr_size);
1110	__skb_pull(skb, sizeof(*eth));
1111
1112out:
1113	return skb;
1114}
1115#endif
1116
1117static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1118						struct net_bridge_mcast_port *pmctx,
1119						struct net_bridge_port_group *pg,
1120						struct br_ip *ip_dst,
1121						struct br_ip *group,
1122						bool with_srcs, bool over_lmqt,
1123						u8 sflag, u8 *igmp_type,
1124						bool *need_rexmit)
1125{
1126	__be32 ip4_dst;
1127
1128	switch (group->proto) {
1129	case htons(ETH_P_IP):
1130		ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1131		return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1132						    ip4_dst, group->dst.ip4,
1133						    with_srcs, over_lmqt,
1134						    sflag, igmp_type,
1135						    need_rexmit);
1136#if IS_ENABLED(CONFIG_IPV6)
1137	case htons(ETH_P_IPV6): {
1138		struct in6_addr ip6_dst;
1139
1140		if (ip_dst)
1141			ip6_dst = ip_dst->dst.ip6;
1142		else
1143			ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1144				      htonl(1));
1145
1146		return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1147						    &ip6_dst, &group->dst.ip6,
1148						    with_srcs, over_lmqt,
1149						    sflag, igmp_type,
1150						    need_rexmit);
1151	}
1152#endif
1153	}
1154	return NULL;
1155}
1156
1157struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1158						    struct br_ip *group)
1159{
1160	struct net_bridge_mdb_entry *mp;
1161	int err;
1162
1163	mp = br_mdb_ip_get(br, group);
1164	if (mp)
1165		return mp;
1166
1167	if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1168		br_mc_disabled_update(br->dev, false, NULL);
1169		br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1170		return ERR_PTR(-E2BIG);
1171	}
1172
1173	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1174	if (unlikely(!mp))
1175		return ERR_PTR(-ENOMEM);
1176
1177	mp->br = br;
1178	mp->addr = *group;
1179	mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1180	timer_setup(&mp->timer, br_multicast_group_expired, 0);
1181	err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1182					    br_mdb_rht_params);
1183	if (err) {
1184		kfree(mp);
1185		mp = ERR_PTR(err);
1186	} else {
1187		hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1188	}
1189
1190	return mp;
1191}
1192
1193static void br_multicast_group_src_expired(struct timer_list *t)
1194{
1195	struct net_bridge_group_src *src = from_timer(src, t, timer);
1196	struct net_bridge_port_group *pg;
1197	struct net_bridge *br = src->br;
1198
1199	spin_lock(&br->multicast_lock);
1200	if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1201	    timer_pending(&src->timer))
1202		goto out;
1203
1204	pg = src->pg;
1205	if (pg->filter_mode == MCAST_INCLUDE) {
1206		br_multicast_del_group_src(src, false);
1207		if (!hlist_empty(&pg->src_list))
1208			goto out;
1209		br_multicast_find_del_pg(br, pg);
1210	} else {
1211		br_multicast_fwd_src_handle(src);
1212	}
1213
1214out:
1215	spin_unlock(&br->multicast_lock);
1216}
1217
1218struct net_bridge_group_src *
1219br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1220{
1221	struct net_bridge_group_src *ent;
1222
1223	switch (ip->proto) {
1224	case htons(ETH_P_IP):
1225		hlist_for_each_entry(ent, &pg->src_list, node)
1226			if (ip->src.ip4 == ent->addr.src.ip4)
1227				return ent;
1228		break;
1229#if IS_ENABLED(CONFIG_IPV6)
1230	case htons(ETH_P_IPV6):
1231		hlist_for_each_entry(ent, &pg->src_list, node)
1232			if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1233				return ent;
1234		break;
1235#endif
1236	}
1237
1238	return NULL;
1239}
1240
1241struct net_bridge_group_src *
1242br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1243{
1244	struct net_bridge_group_src *grp_src;
1245
1246	if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1247		return NULL;
1248
1249	switch (src_ip->proto) {
1250	case htons(ETH_P_IP):
1251		if (ipv4_is_zeronet(src_ip->src.ip4) ||
1252		    ipv4_is_multicast(src_ip->src.ip4))
1253			return NULL;
1254		break;
1255#if IS_ENABLED(CONFIG_IPV6)
1256	case htons(ETH_P_IPV6):
1257		if (ipv6_addr_any(&src_ip->src.ip6) ||
1258		    ipv6_addr_is_multicast(&src_ip->src.ip6))
1259			return NULL;
1260		break;
1261#endif
1262	}
1263
1264	grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1265	if (unlikely(!grp_src))
1266		return NULL;
1267
1268	grp_src->pg = pg;
1269	grp_src->br = pg->key.port->br;
1270	grp_src->addr = *src_ip;
1271	grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1272	timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1273
1274	hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1275	pg->src_ents++;
1276
1277	return grp_src;
1278}
1279
1280struct net_bridge_port_group *br_multicast_new_port_group(
1281			struct net_bridge_port *port,
1282			const struct br_ip *group,
1283			struct net_bridge_port_group __rcu *next,
1284			unsigned char flags,
1285			const unsigned char *src,
1286			u8 filter_mode,
1287			u8 rt_protocol)
1288{
1289	struct net_bridge_port_group *p;
1290
1291	p = kzalloc(sizeof(*p), GFP_ATOMIC);
1292	if (unlikely(!p))
1293		return NULL;
1294
1295	p->key.addr = *group;
1296	p->key.port = port;
1297	p->flags = flags;
1298	p->filter_mode = filter_mode;
1299	p->rt_protocol = rt_protocol;
1300	p->eht_host_tree = RB_ROOT;
1301	p->eht_set_tree = RB_ROOT;
1302	p->mcast_gc.destroy = br_multicast_destroy_port_group;
1303	INIT_HLIST_HEAD(&p->src_list);
1304
1305	if (!br_multicast_is_star_g(group) &&
1306	    rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1307					  br_sg_port_rht_params)) {
1308		kfree(p);
1309		return NULL;
1310	}
1311
1312	rcu_assign_pointer(p->next, next);
 
1313	timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1314	timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1315	hlist_add_head(&p->mglist, &port->mglist);
1316
1317	if (src)
1318		memcpy(p->eth_addr, src, ETH_ALEN);
1319	else
1320		eth_broadcast_addr(p->eth_addr);
1321
1322	return p;
1323}
1324
1325void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
1326			    struct net_bridge_mdb_entry *mp, bool notify)
 
 
 
 
 
 
 
 
 
 
 
 
1327{
1328	if (!mp->host_joined) {
1329		mp->host_joined = true;
1330		if (br_multicast_is_star_g(&mp->addr))
1331			br_multicast_star_g_host_state(mp);
1332		if (notify)
1333			br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
 
1334	}
1335
1336	if (br_group_is_l2(&mp->addr))
1337		return;
1338
1339	mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
1340}
1341
1342void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1343{
1344	if (!mp->host_joined)
1345		return;
1346
1347	mp->host_joined = false;
1348	if (br_multicast_is_star_g(&mp->addr))
1349		br_multicast_star_g_host_state(mp);
1350	if (notify)
1351		br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1352}
1353
1354static struct net_bridge_port_group *
1355__br_multicast_add_group(struct net_bridge_mcast *brmctx,
1356			 struct net_bridge_mcast_port *pmctx,
1357			 struct br_ip *group,
1358			 const unsigned char *src,
1359			 u8 filter_mode,
1360			 bool igmpv2_mldv1,
1361			 bool blocked)
1362{
1363	struct net_bridge_port_group __rcu **pp;
1364	struct net_bridge_port_group *p = NULL;
1365	struct net_bridge_mdb_entry *mp;
1366	unsigned long now = jiffies;
 
1367
1368	if (!br_multicast_ctx_should_use(brmctx, pmctx))
 
 
1369		goto out;
1370
1371	mp = br_multicast_new_group(brmctx->br, group);
 
1372	if (IS_ERR(mp))
1373		return ERR_CAST(mp);
1374
1375	if (!pmctx) {
1376		br_multicast_host_join(brmctx, mp, true);
1377		goto out;
1378	}
1379
1380	for (pp = &mp->ports;
1381	     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1382	     pp = &p->next) {
1383		if (br_port_group_equal(p, pmctx->port, src))
1384			goto found;
1385		if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1386			break;
1387	}
1388
1389	p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1390					filter_mode, RTPROT_KERNEL);
1391	if (unlikely(!p)) {
1392		p = ERR_PTR(-ENOMEM);
1393		goto out;
1394	}
1395	rcu_assign_pointer(*pp, p);
1396	if (blocked)
1397		p->flags |= MDB_PG_FLAGS_BLOCKED;
1398	br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1399
1400found:
1401	if (igmpv2_mldv1)
1402		mod_timer(&p->timer,
1403			  now + brmctx->multicast_membership_interval);
1404
1405out:
1406	return p;
1407}
1408
1409static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1410				  struct net_bridge_mcast_port *pmctx,
1411				  struct br_ip *group,
1412				  const unsigned char *src,
1413				  u8 filter_mode,
1414				  bool igmpv2_mldv1)
1415{
1416	struct net_bridge_port_group *pg;
1417	int err;
1418
1419	spin_lock(&brmctx->br->multicast_lock);
1420	pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1421				      igmpv2_mldv1, false);
1422	/* NULL is considered valid for host joined groups */
1423	err = PTR_ERR_OR_ZERO(pg);
1424	spin_unlock(&brmctx->br->multicast_lock);
1425
 
 
1426	return err;
1427}
1428
1429static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1430				      struct net_bridge_mcast_port *pmctx,
1431				      __be32 group,
1432				      __u16 vid,
1433				      const unsigned char *src,
1434				      bool igmpv2)
1435{
1436	struct br_ip br_group;
1437	u8 filter_mode;
1438
1439	if (ipv4_is_local_multicast(group))
1440		return 0;
1441
1442	memset(&br_group, 0, sizeof(br_group));
1443	br_group.dst.ip4 = group;
1444	br_group.proto = htons(ETH_P_IP);
1445	br_group.vid = vid;
1446	filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1447
1448	return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1449				      filter_mode, igmpv2);
1450}
1451
1452#if IS_ENABLED(CONFIG_IPV6)
1453static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1454				      struct net_bridge_mcast_port *pmctx,
1455				      const struct in6_addr *group,
1456				      __u16 vid,
1457				      const unsigned char *src,
1458				      bool mldv1)
1459{
1460	struct br_ip br_group;
1461	u8 filter_mode;
1462
1463	if (ipv6_addr_is_ll_all_nodes(group))
1464		return 0;
1465
1466	memset(&br_group, 0, sizeof(br_group));
1467	br_group.dst.ip6 = *group;
1468	br_group.proto = htons(ETH_P_IPV6);
1469	br_group.vid = vid;
1470	filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1471
1472	return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1473				      filter_mode, mldv1);
1474}
1475#endif
1476
1477static bool br_multicast_rport_del(struct hlist_node *rlist)
1478{
1479	if (hlist_unhashed(rlist))
1480		return false;
1481
1482	hlist_del_init_rcu(rlist);
1483	return true;
1484}
1485
1486static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1487{
1488	return br_multicast_rport_del(&pmctx->ip4_rlist);
1489}
1490
1491static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1492{
1493#if IS_ENABLED(CONFIG_IPV6)
1494	return br_multicast_rport_del(&pmctx->ip6_rlist);
1495#else
1496	return false;
1497#endif
1498}
1499
1500static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1501					struct timer_list *t,
1502					struct hlist_node *rlist)
1503{
1504	struct net_bridge *br = pmctx->port->br;
1505	bool del;
1506
1507	spin_lock(&br->multicast_lock);
1508	if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1509	    pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1510	    timer_pending(t))
1511		goto out;
1512
1513	del = br_multicast_rport_del(rlist);
1514	br_multicast_rport_del_notify(pmctx, del);
1515out:
1516	spin_unlock(&br->multicast_lock);
1517}
1518
1519static void br_ip4_multicast_router_expired(struct timer_list *t)
1520{
1521	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1522							 ip4_mc_router_timer);
1523
1524	br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1525}
1526
1527#if IS_ENABLED(CONFIG_IPV6)
1528static void br_ip6_multicast_router_expired(struct timer_list *t)
1529{
1530	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1531							 ip6_mc_router_timer);
1532
1533	br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1534}
1535#endif
1536
1537static void br_mc_router_state_change(struct net_bridge *p,
1538				      bool is_mc_router)
1539{
1540	struct switchdev_attr attr = {
1541		.orig_dev = p->dev,
1542		.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1543		.flags = SWITCHDEV_F_DEFER,
1544		.u.mrouter = is_mc_router,
1545	};
1546
1547	switchdev_port_attr_set(p->dev, &attr, NULL);
1548}
1549
1550static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1551					      struct timer_list *timer)
1552{
1553	spin_lock(&brmctx->br->multicast_lock);
1554	if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1555	    brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1556	    br_ip4_multicast_is_router(brmctx) ||
1557	    br_ip6_multicast_is_router(brmctx))
 
1558		goto out;
1559
1560	br_mc_router_state_change(brmctx->br, false);
1561out:
1562	spin_unlock(&brmctx->br->multicast_lock);
1563}
1564
1565static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1566{
1567	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1568						     ip4_mc_router_timer);
1569
1570	br_multicast_local_router_expired(brmctx, t);
1571}
1572
1573#if IS_ENABLED(CONFIG_IPV6)
1574static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1575{
1576	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1577						     ip6_mc_router_timer);
1578
1579	br_multicast_local_router_expired(brmctx, t);
1580}
1581#endif
1582
1583static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1584					 struct bridge_mcast_own_query *query)
1585{
1586	spin_lock(&brmctx->br->multicast_lock);
1587	if (!netif_running(brmctx->br->dev) ||
1588	    br_multicast_ctx_vlan_global_disabled(brmctx) ||
1589	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1590		goto out;
1591
1592	br_multicast_start_querier(brmctx, query);
1593
1594out:
1595	spin_unlock(&brmctx->br->multicast_lock);
1596}
1597
1598static void br_ip4_multicast_querier_expired(struct timer_list *t)
1599{
1600	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1601						     ip4_other_query.timer);
1602
1603	br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1604}
1605
1606#if IS_ENABLED(CONFIG_IPV6)
1607static void br_ip6_multicast_querier_expired(struct timer_list *t)
1608{
1609	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1610						     ip6_other_query.timer);
1611
1612	br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1613}
1614#endif
1615
1616static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1617					    struct br_ip *ip,
1618					    struct sk_buff *skb)
1619{
1620	if (ip->proto == htons(ETH_P_IP))
1621		brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1622#if IS_ENABLED(CONFIG_IPV6)
1623	else
1624		brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1625#endif
1626}
1627
1628static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1629				      struct net_bridge_mcast_port *pmctx,
1630				      struct net_bridge_port_group *pg,
1631				      struct br_ip *ip_dst,
1632				      struct br_ip *group,
1633				      bool with_srcs,
1634				      u8 sflag,
1635				      bool *need_rexmit)
1636{
1637	bool over_lmqt = !!sflag;
1638	struct sk_buff *skb;
1639	u8 igmp_type;
1640
1641	if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1642	    !br_multicast_ctx_matches_vlan_snooping(brmctx))
1643		return;
1644
1645again_under_lmqt:
1646	skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
1647				       with_srcs, over_lmqt, sflag, &igmp_type,
1648				       need_rexmit);
1649	if (!skb)
1650		return;
1651
1652	if (pmctx) {
1653		skb->dev = pmctx->port->dev;
1654		br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1655				   BR_MCAST_DIR_TX);
1656		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1657			dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1658			br_dev_queue_push_xmit);
1659
1660		if (over_lmqt && with_srcs && sflag) {
1661			over_lmqt = false;
1662			goto again_under_lmqt;
1663		}
1664	} else {
1665		br_multicast_select_own_querier(brmctx, group, skb);
1666		br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1667				   BR_MCAST_DIR_RX);
1668		netif_rx(skb);
1669	}
1670}
1671
1672static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
1673				      struct bridge_mcast_querier *dest)
1674{
1675	unsigned int seq;
1676
1677	memset(dest, 0, sizeof(*dest));
1678	do {
1679		seq = read_seqcount_begin(&querier->seq);
1680		dest->port_ifidx = querier->port_ifidx;
1681		memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
1682	} while (read_seqcount_retry(&querier->seq, seq));
1683}
1684
1685static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
1686					struct bridge_mcast_querier *querier,
1687					int ifindex,
1688					struct br_ip *saddr)
1689{
1690	write_seqcount_begin(&querier->seq);
1691	querier->port_ifidx = ifindex;
1692	memcpy(&querier->addr, saddr, sizeof(*saddr));
1693	write_seqcount_end(&querier->seq);
1694}
1695
1696static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1697				    struct net_bridge_mcast_port *pmctx,
1698				    struct bridge_mcast_own_query *own_query)
1699{
1700	struct bridge_mcast_other_query *other_query = NULL;
1701	struct bridge_mcast_querier *querier;
1702	struct br_ip br_group;
1703	unsigned long time;
1704
1705	if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1706	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1707	    !brmctx->multicast_querier)
1708		return;
1709
1710	memset(&br_group.dst, 0, sizeof(br_group.dst));
1711
1712	if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1713		    (own_query == &brmctx->ip4_own_query)) {
1714		querier = &brmctx->ip4_querier;
1715		other_query = &brmctx->ip4_other_query;
1716		br_group.proto = htons(ETH_P_IP);
1717#if IS_ENABLED(CONFIG_IPV6)
1718	} else {
1719		querier = &brmctx->ip6_querier;
1720		other_query = &brmctx->ip6_other_query;
1721		br_group.proto = htons(ETH_P_IPV6);
1722#endif
1723	}
1724
1725	if (!other_query || timer_pending(&other_query->timer))
1726		return;
1727
1728	/* we're about to select ourselves as querier */
1729	if (!pmctx && querier->port_ifidx) {
1730		struct br_ip zeroip = {};
1731
1732		br_multicast_update_querier(brmctx, querier, 0, &zeroip);
1733	}
1734
1735	__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1736				  0, NULL);
1737
1738	time = jiffies;
1739	time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1740		brmctx->multicast_startup_query_interval :
1741		brmctx->multicast_query_interval;
1742	mod_timer(&own_query->timer, time);
1743}
1744
1745static void
1746br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1747				struct bridge_mcast_own_query *query)
1748{
1749	struct net_bridge *br = pmctx->port->br;
1750	struct net_bridge_mcast *brmctx;
1751
1752	spin_lock(&br->multicast_lock);
1753	if (br_multicast_port_ctx_state_stopped(pmctx))
 
1754		goto out;
1755
1756	brmctx = br_multicast_port_ctx_get_global(pmctx);
1757	if (query->startup_sent < brmctx->multicast_startup_query_count)
1758		query->startup_sent++;
1759
1760	br_multicast_send_query(brmctx, pmctx, query);
1761
1762out:
1763	spin_unlock(&br->multicast_lock);
1764}
1765
1766static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1767{
1768	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1769							 ip4_own_query.timer);
1770
1771	br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1772}
1773
1774#if IS_ENABLED(CONFIG_IPV6)
1775static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1776{
1777	struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1778							 ip6_own_query.timer);
1779
1780	br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1781}
1782#endif
1783
1784static void br_multicast_port_group_rexmit(struct timer_list *t)
1785{
1786	struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1787	struct bridge_mcast_other_query *other_query = NULL;
1788	struct net_bridge *br = pg->key.port->br;
1789	struct net_bridge_mcast_port *pmctx;
1790	struct net_bridge_mcast *brmctx;
1791	bool need_rexmit = false;
1792
1793	spin_lock(&br->multicast_lock);
1794	if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1795	    !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1796		goto out;
1797
1798	pmctx = br_multicast_pg_to_port_ctx(pg);
1799	if (!pmctx)
1800		goto out;
1801	brmctx = br_multicast_port_ctx_get_global(pmctx);
1802	if (!brmctx->multicast_querier)
1803		goto out;
1804
1805	if (pg->key.addr.proto == htons(ETH_P_IP))
1806		other_query = &brmctx->ip4_other_query;
1807#if IS_ENABLED(CONFIG_IPV6)
1808	else
1809		other_query = &brmctx->ip6_other_query;
1810#endif
1811
1812	if (!other_query || timer_pending(&other_query->timer))
1813		goto out;
1814
1815	if (pg->grp_query_rexmit_cnt) {
1816		pg->grp_query_rexmit_cnt--;
1817		__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1818					  &pg->key.addr, false, 1, NULL);
1819	}
1820	__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1821				  &pg->key.addr, true, 0, &need_rexmit);
1822
1823	if (pg->grp_query_rexmit_cnt || need_rexmit)
1824		mod_timer(&pg->rexmit_timer, jiffies +
1825					     brmctx->multicast_last_member_interval);
1826out:
1827	spin_unlock(&br->multicast_lock);
1828}
1829
1830static int br_mc_disabled_update(struct net_device *dev, bool value,
1831				 struct netlink_ext_ack *extack)
1832{
1833	struct switchdev_attr attr = {
1834		.orig_dev = dev,
1835		.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1836		.flags = SWITCHDEV_F_DEFER,
1837		.u.mc_disabled = !value,
1838	};
1839
1840	return switchdev_port_attr_set(dev, &attr, extack);
1841}
1842
1843void br_multicast_port_ctx_init(struct net_bridge_port *port,
1844				struct net_bridge_vlan *vlan,
1845				struct net_bridge_mcast_port *pmctx)
1846{
1847	pmctx->port = port;
1848	pmctx->vlan = vlan;
1849	pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1850	timer_setup(&pmctx->ip4_mc_router_timer,
1851		    br_ip4_multicast_router_expired, 0);
1852	timer_setup(&pmctx->ip4_own_query.timer,
1853		    br_ip4_multicast_port_query_expired, 0);
1854#if IS_ENABLED(CONFIG_IPV6)
1855	timer_setup(&pmctx->ip6_mc_router_timer,
1856		    br_ip6_multicast_router_expired, 0);
1857	timer_setup(&pmctx->ip6_own_query.timer,
1858		    br_ip6_multicast_port_query_expired, 0);
1859#endif
1860}
1861
1862void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
1863{
1864#if IS_ENABLED(CONFIG_IPV6)
1865	del_timer_sync(&pmctx->ip6_mc_router_timer);
1866#endif
1867	del_timer_sync(&pmctx->ip4_mc_router_timer);
1868}
1869
1870int br_multicast_add_port(struct net_bridge_port *port)
1871{
1872	int err;
1873
1874	port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
1875	br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
1876
1877	err = br_mc_disabled_update(port->dev,
1878				    br_opt_get(port->br,
1879					       BROPT_MULTICAST_ENABLED),
1880				    NULL);
1881	if (err && err != -EOPNOTSUPP)
1882		return err;
1883
1884	port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1885	if (!port->mcast_stats)
1886		return -ENOMEM;
1887
1888	return 0;
1889}
1890
1891void br_multicast_del_port(struct net_bridge_port *port)
1892{
1893	struct net_bridge *br = port->br;
1894	struct net_bridge_port_group *pg;
1895	HLIST_HEAD(deleted_head);
1896	struct hlist_node *n;
1897
1898	/* Take care of the remaining groups, only perm ones should be left */
1899	spin_lock_bh(&br->multicast_lock);
1900	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1901		br_multicast_find_del_pg(br, pg);
1902	hlist_move_list(&br->mcast_gc_list, &deleted_head);
1903	spin_unlock_bh(&br->multicast_lock);
1904	br_multicast_gc(&deleted_head);
1905	br_multicast_port_ctx_deinit(&port->multicast_ctx);
1906	free_percpu(port->mcast_stats);
1907}
1908
1909static void br_multicast_enable(struct bridge_mcast_own_query *query)
1910{
1911	query->startup_sent = 0;
1912
1913	if (try_to_del_timer_sync(&query->timer) >= 0 ||
1914	    del_timer(&query->timer))
1915		mod_timer(&query->timer, jiffies);
1916}
1917
1918static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
1919{
1920	struct net_bridge *br = pmctx->port->br;
1921	struct net_bridge_mcast *brmctx;
1922
1923	brmctx = br_multicast_port_ctx_get_global(pmctx);
1924	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1925	    !netif_running(br->dev))
1926		return;
1927
1928	br_multicast_enable(&pmctx->ip4_own_query);
1929#if IS_ENABLED(CONFIG_IPV6)
1930	br_multicast_enable(&pmctx->ip6_own_query);
1931#endif
1932	if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
1933		br_ip4_multicast_add_router(brmctx, pmctx);
1934		br_ip6_multicast_add_router(brmctx, pmctx);
1935	}
1936}
1937
1938void br_multicast_enable_port(struct net_bridge_port *port)
1939{
1940	struct net_bridge *br = port->br;
1941
1942	spin_lock_bh(&br->multicast_lock);
1943	__br_multicast_enable_port_ctx(&port->multicast_ctx);
1944	spin_unlock_bh(&br->multicast_lock);
1945}
1946
1947static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
1948{
 
1949	struct net_bridge_port_group *pg;
1950	struct hlist_node *n;
1951	bool del = false;
1952
1953	hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
1954		if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
1955		    (!br_multicast_port_ctx_is_vlan(pmctx) ||
1956		     pg->key.addr.vid == pmctx->vlan->vid))
1957			br_multicast_find_del_pg(pmctx->port->br, pg);
1958
1959	del |= br_ip4_multicast_rport_del(pmctx);
1960	del_timer(&pmctx->ip4_mc_router_timer);
1961	del_timer(&pmctx->ip4_own_query.timer);
1962	del |= br_ip6_multicast_rport_del(pmctx);
1963#if IS_ENABLED(CONFIG_IPV6)
1964	del_timer(&pmctx->ip6_mc_router_timer);
1965	del_timer(&pmctx->ip6_own_query.timer);
1966#endif
1967	br_multicast_rport_del_notify(pmctx, del);
1968}
1969
1970void br_multicast_disable_port(struct net_bridge_port *port)
1971{
1972	spin_lock_bh(&port->br->multicast_lock);
1973	__br_multicast_disable_port_ctx(&port->multicast_ctx);
1974	spin_unlock_bh(&port->br->multicast_lock);
1975}
1976
1977static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1978{
1979	struct net_bridge_group_src *ent;
1980	struct hlist_node *tmp;
1981	int deleted = 0;
1982
1983	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1984		if (ent->flags & BR_SGRP_F_DELETE) {
1985			br_multicast_del_group_src(ent, false);
1986			deleted++;
1987		}
1988
1989	return deleted;
1990}
1991
1992static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1993				unsigned long expires)
1994{
1995	mod_timer(&src->timer, expires);
1996	br_multicast_fwd_src_handle(src);
1997}
1998
1999static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
2000					      struct net_bridge_mcast_port *pmctx,
2001					      struct net_bridge_port_group *pg)
2002{
2003	struct bridge_mcast_other_query *other_query = NULL;
2004	u32 lmqc = brmctx->multicast_last_member_count;
2005	unsigned long lmqt, lmi, now = jiffies;
2006	struct net_bridge_group_src *ent;
2007
2008	if (!netif_running(brmctx->br->dev) ||
2009	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2010		return;
2011
2012	if (pg->key.addr.proto == htons(ETH_P_IP))
2013		other_query = &brmctx->ip4_other_query;
2014#if IS_ENABLED(CONFIG_IPV6)
2015	else
2016		other_query = &brmctx->ip6_other_query;
2017#endif
2018
2019	lmqt = now + br_multicast_lmqt(brmctx);
2020	hlist_for_each_entry(ent, &pg->src_list, node) {
2021		if (ent->flags & BR_SGRP_F_SEND) {
2022			ent->flags &= ~BR_SGRP_F_SEND;
2023			if (ent->timer.expires > lmqt) {
2024				if (brmctx->multicast_querier &&
2025				    other_query &&
2026				    !timer_pending(&other_query->timer))
2027					ent->src_query_rexmit_cnt = lmqc;
2028				__grp_src_mod_timer(ent, lmqt);
2029			}
2030		}
2031	}
2032
2033	if (!brmctx->multicast_querier ||
2034	    !other_query || timer_pending(&other_query->timer))
2035		return;
2036
2037	__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2038				  &pg->key.addr, true, 1, NULL);
2039
2040	lmi = now + brmctx->multicast_last_member_interval;
2041	if (!timer_pending(&pg->rexmit_timer) ||
2042	    time_after(pg->rexmit_timer.expires, lmi))
2043		mod_timer(&pg->rexmit_timer, lmi);
2044}
2045
2046static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
2047					struct net_bridge_mcast_port *pmctx,
2048					struct net_bridge_port_group *pg)
2049{
2050	struct bridge_mcast_other_query *other_query = NULL;
2051	unsigned long now = jiffies, lmi;
2052
2053	if (!netif_running(brmctx->br->dev) ||
2054	    !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2055		return;
2056
2057	if (pg->key.addr.proto == htons(ETH_P_IP))
2058		other_query = &brmctx->ip4_other_query;
2059#if IS_ENABLED(CONFIG_IPV6)
2060	else
2061		other_query = &brmctx->ip6_other_query;
2062#endif
2063
2064	if (brmctx->multicast_querier &&
2065	    other_query && !timer_pending(&other_query->timer)) {
2066		lmi = now + brmctx->multicast_last_member_interval;
2067		pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2068		__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2069					  &pg->key.addr, false, 0, NULL);
2070		if (!timer_pending(&pg->rexmit_timer) ||
2071		    time_after(pg->rexmit_timer.expires, lmi))
2072			mod_timer(&pg->rexmit_timer, lmi);
2073	}
2074
2075	if (pg->filter_mode == MCAST_EXCLUDE &&
2076	    (!timer_pending(&pg->timer) ||
2077	     time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
2078		mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2079}
2080
2081/* State          Msg type      New state                Actions
2082 * INCLUDE (A)    IS_IN (B)     INCLUDE (A+B)            (B)=GMI
2083 * INCLUDE (A)    ALLOW (B)     INCLUDE (A+B)            (B)=GMI
2084 * EXCLUDE (X,Y)  ALLOW (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
2085 */
2086static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
2087				     struct net_bridge_port_group *pg, void *h_addr,
2088				     void *srcs, u32 nsrcs, size_t addr_size,
2089				     int grec_type)
2090{
2091	struct net_bridge_group_src *ent;
2092	unsigned long now = jiffies;
2093	bool changed = false;
2094	struct br_ip src_ip;
2095	u32 src_idx;
2096
2097	memset(&src_ip, 0, sizeof(src_ip));
2098	src_ip.proto = pg->key.addr.proto;
2099	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2100		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2101		ent = br_multicast_find_group_src(pg, &src_ip);
2102		if (!ent) {
2103			ent = br_multicast_new_group_src(pg, &src_ip);
2104			if (ent)
2105				changed = true;
2106		}
2107
2108		if (ent)
2109			__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2110	}
2111
2112	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2113				    grec_type))
2114		changed = true;
2115
2116	return changed;
2117}
2118
2119/* State          Msg type      New state                Actions
2120 * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2121 *                                                       Delete (A-B)
2122 *                                                       Group Timer=GMI
2123 */
2124static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
2125				 struct net_bridge_port_group *pg, void *h_addr,
2126				 void *srcs, u32 nsrcs, size_t addr_size,
2127				 int grec_type)
2128{
2129	struct net_bridge_group_src *ent;
2130	struct br_ip src_ip;
2131	u32 src_idx;
2132
2133	hlist_for_each_entry(ent, &pg->src_list, node)
2134		ent->flags |= BR_SGRP_F_DELETE;
2135
2136	memset(&src_ip, 0, sizeof(src_ip));
2137	src_ip.proto = pg->key.addr.proto;
2138	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2139		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2140		ent = br_multicast_find_group_src(pg, &src_ip);
2141		if (ent)
2142			ent->flags &= ~BR_SGRP_F_DELETE;
2143		else
2144			ent = br_multicast_new_group_src(pg, &src_ip);
2145		if (ent)
2146			br_multicast_fwd_src_handle(ent);
2147	}
2148
2149	br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2150				grec_type);
2151
2152	__grp_src_delete_marked(pg);
2153}
2154
2155/* State          Msg type      New state                Actions
2156 * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=GMI
2157 *                                                       Delete (X-A)
2158 *                                                       Delete (Y-A)
2159 *                                                       Group Timer=GMI
2160 */
2161static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2162				 struct net_bridge_port_group *pg, void *h_addr,
2163				 void *srcs, u32 nsrcs, size_t addr_size,
2164				 int grec_type)
2165{
2166	struct net_bridge_group_src *ent;
2167	unsigned long now = jiffies;
2168	bool changed = false;
2169	struct br_ip src_ip;
2170	u32 src_idx;
2171
2172	hlist_for_each_entry(ent, &pg->src_list, node)
2173		ent->flags |= BR_SGRP_F_DELETE;
2174
2175	memset(&src_ip, 0, sizeof(src_ip));
2176	src_ip.proto = pg->key.addr.proto;
2177	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2178		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2179		ent = br_multicast_find_group_src(pg, &src_ip);
2180		if (ent) {
2181			ent->flags &= ~BR_SGRP_F_DELETE;
2182		} else {
2183			ent = br_multicast_new_group_src(pg, &src_ip);
2184			if (ent) {
2185				__grp_src_mod_timer(ent,
2186						    now + br_multicast_gmi(brmctx));
2187				changed = true;
2188			}
2189		}
2190	}
2191
2192	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2193				    grec_type))
2194		changed = true;
2195
2196	if (__grp_src_delete_marked(pg))
2197		changed = true;
2198
2199	return changed;
2200}
2201
2202static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2203			       struct net_bridge_port_group *pg, void *h_addr,
2204			       void *srcs, u32 nsrcs, size_t addr_size,
2205			       int grec_type)
2206{
2207	bool changed = false;
2208
2209	switch (pg->filter_mode) {
2210	case MCAST_INCLUDE:
2211		__grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2212				     grec_type);
2213		br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2214		changed = true;
2215		break;
2216	case MCAST_EXCLUDE:
2217		changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2218					       addr_size, grec_type);
2219		break;
2220	}
2221
2222	pg->filter_mode = MCAST_EXCLUDE;
2223	mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2224
2225	return changed;
2226}
2227
2228/* State          Msg type      New state                Actions
2229 * INCLUDE (A)    TO_IN (B)     INCLUDE (A+B)            (B)=GMI
2230 *                                                       Send Q(G,A-B)
2231 */
2232static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2233				struct net_bridge_mcast_port *pmctx,
2234				struct net_bridge_port_group *pg, void *h_addr,
2235				void *srcs, u32 nsrcs, size_t addr_size,
2236				int grec_type)
2237{
2238	u32 src_idx, to_send = pg->src_ents;
2239	struct net_bridge_group_src *ent;
2240	unsigned long now = jiffies;
2241	bool changed = false;
2242	struct br_ip src_ip;
2243
2244	hlist_for_each_entry(ent, &pg->src_list, node)
2245		ent->flags |= BR_SGRP_F_SEND;
2246
2247	memset(&src_ip, 0, sizeof(src_ip));
2248	src_ip.proto = pg->key.addr.proto;
2249	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2250		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2251		ent = br_multicast_find_group_src(pg, &src_ip);
2252		if (ent) {
2253			ent->flags &= ~BR_SGRP_F_SEND;
2254			to_send--;
2255		} else {
2256			ent = br_multicast_new_group_src(pg, &src_ip);
2257			if (ent)
2258				changed = true;
2259		}
2260		if (ent)
2261			__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2262	}
2263
2264	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2265				    grec_type))
2266		changed = true;
2267
2268	if (to_send)
2269		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2270
2271	return changed;
2272}
2273
2274/* State          Msg type      New state                Actions
2275 * EXCLUDE (X,Y)  TO_IN (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
2276 *                                                       Send Q(G,X-A)
2277 *                                                       Send Q(G)
2278 */
2279static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2280				struct net_bridge_mcast_port *pmctx,
2281				struct net_bridge_port_group *pg, void *h_addr,
2282				void *srcs, u32 nsrcs, size_t addr_size,
2283				int grec_type)
2284{
2285	u32 src_idx, to_send = pg->src_ents;
2286	struct net_bridge_group_src *ent;
2287	unsigned long now = jiffies;
2288	bool changed = false;
2289	struct br_ip src_ip;
2290
2291	hlist_for_each_entry(ent, &pg->src_list, node)
2292		if (timer_pending(&ent->timer))
2293			ent->flags |= BR_SGRP_F_SEND;
2294
2295	memset(&src_ip, 0, sizeof(src_ip));
2296	src_ip.proto = pg->key.addr.proto;
2297	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2298		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2299		ent = br_multicast_find_group_src(pg, &src_ip);
2300		if (ent) {
2301			if (timer_pending(&ent->timer)) {
2302				ent->flags &= ~BR_SGRP_F_SEND;
2303				to_send--;
2304			}
2305		} else {
2306			ent = br_multicast_new_group_src(pg, &src_ip);
2307			if (ent)
2308				changed = true;
2309		}
2310		if (ent)
2311			__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2312	}
2313
2314	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2315				    grec_type))
2316		changed = true;
2317
2318	if (to_send)
2319		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2320
2321	__grp_send_query_and_rexmit(brmctx, pmctx, pg);
2322
2323	return changed;
2324}
2325
2326static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2327			      struct net_bridge_mcast_port *pmctx,
2328			      struct net_bridge_port_group *pg, void *h_addr,
2329			      void *srcs, u32 nsrcs, size_t addr_size,
2330			      int grec_type)
2331{
2332	bool changed = false;
2333
2334	switch (pg->filter_mode) {
2335	case MCAST_INCLUDE:
2336		changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2337					      nsrcs, addr_size, grec_type);
2338		break;
2339	case MCAST_EXCLUDE:
2340		changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2341					      nsrcs, addr_size, grec_type);
2342		break;
2343	}
2344
2345	if (br_multicast_eht_should_del_pg(pg)) {
2346		pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2347		br_multicast_find_del_pg(pg->key.port->br, pg);
2348		/* a notification has already been sent and we shouldn't
2349		 * access pg after the delete so we have to return false
2350		 */
2351		changed = false;
2352	}
2353
2354	return changed;
2355}
2356
2357/* State          Msg type      New state                Actions
2358 * INCLUDE (A)    TO_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2359 *                                                       Delete (A-B)
2360 *                                                       Send Q(G,A*B)
2361 *                                                       Group Timer=GMI
2362 */
2363static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2364				struct net_bridge_mcast_port *pmctx,
2365				struct net_bridge_port_group *pg, void *h_addr,
2366				void *srcs, u32 nsrcs, size_t addr_size,
2367				int grec_type)
2368{
2369	struct net_bridge_group_src *ent;
2370	u32 src_idx, to_send = 0;
2371	struct br_ip src_ip;
2372
2373	hlist_for_each_entry(ent, &pg->src_list, node)
2374		ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2375
2376	memset(&src_ip, 0, sizeof(src_ip));
2377	src_ip.proto = pg->key.addr.proto;
2378	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2379		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2380		ent = br_multicast_find_group_src(pg, &src_ip);
2381		if (ent) {
2382			ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2383				     BR_SGRP_F_SEND;
2384			to_send++;
2385		} else {
2386			ent = br_multicast_new_group_src(pg, &src_ip);
2387		}
2388		if (ent)
2389			br_multicast_fwd_src_handle(ent);
2390	}
2391
2392	br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2393				grec_type);
2394
2395	__grp_src_delete_marked(pg);
2396	if (to_send)
2397		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2398}
2399
2400/* State          Msg type      New state                Actions
2401 * EXCLUDE (X,Y)  TO_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=Group Timer
2402 *                                                       Delete (X-A)
2403 *                                                       Delete (Y-A)
2404 *                                                       Send Q(G,A-Y)
2405 *                                                       Group Timer=GMI
2406 */
2407static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2408				struct net_bridge_mcast_port *pmctx,
2409				struct net_bridge_port_group *pg, void *h_addr,
2410				void *srcs, u32 nsrcs, size_t addr_size,
2411				int grec_type)
2412{
2413	struct net_bridge_group_src *ent;
2414	u32 src_idx, to_send = 0;
2415	bool changed = false;
2416	struct br_ip src_ip;
2417
2418	hlist_for_each_entry(ent, &pg->src_list, node)
2419		ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2420
2421	memset(&src_ip, 0, sizeof(src_ip));
2422	src_ip.proto = pg->key.addr.proto;
2423	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2424		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2425		ent = br_multicast_find_group_src(pg, &src_ip);
2426		if (ent) {
2427			ent->flags &= ~BR_SGRP_F_DELETE;
2428		} else {
2429			ent = br_multicast_new_group_src(pg, &src_ip);
2430			if (ent) {
2431				__grp_src_mod_timer(ent, pg->timer.expires);
2432				changed = true;
2433			}
2434		}
2435		if (ent && timer_pending(&ent->timer)) {
2436			ent->flags |= BR_SGRP_F_SEND;
2437			to_send++;
2438		}
2439	}
2440
2441	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2442				    grec_type))
2443		changed = true;
2444
2445	if (__grp_src_delete_marked(pg))
2446		changed = true;
2447	if (to_send)
2448		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2449
2450	return changed;
2451}
2452
2453static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2454			      struct net_bridge_mcast_port *pmctx,
2455			      struct net_bridge_port_group *pg, void *h_addr,
2456			      void *srcs, u32 nsrcs, size_t addr_size,
2457			      int grec_type)
2458{
2459	bool changed = false;
2460
2461	switch (pg->filter_mode) {
2462	case MCAST_INCLUDE:
2463		__grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2464				    addr_size, grec_type);
2465		br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2466		changed = true;
2467		break;
2468	case MCAST_EXCLUDE:
2469		changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2470					      nsrcs, addr_size, grec_type);
2471		break;
2472	}
2473
2474	pg->filter_mode = MCAST_EXCLUDE;
2475	mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2476
2477	return changed;
2478}
2479
2480/* State          Msg type      New state                Actions
2481 * INCLUDE (A)    BLOCK (B)     INCLUDE (A)              Send Q(G,A*B)
2482 */
2483static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2484				 struct net_bridge_mcast_port *pmctx,
2485				 struct net_bridge_port_group *pg, void *h_addr,
2486				 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2487{
2488	struct net_bridge_group_src *ent;
2489	u32 src_idx, to_send = 0;
2490	bool changed = false;
2491	struct br_ip src_ip;
2492
2493	hlist_for_each_entry(ent, &pg->src_list, node)
2494		ent->flags &= ~BR_SGRP_F_SEND;
2495
2496	memset(&src_ip, 0, sizeof(src_ip));
2497	src_ip.proto = pg->key.addr.proto;
2498	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2499		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2500		ent = br_multicast_find_group_src(pg, &src_ip);
2501		if (ent) {
2502			ent->flags |= BR_SGRP_F_SEND;
2503			to_send++;
2504		}
2505	}
2506
2507	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2508				    grec_type))
2509		changed = true;
2510
2511	if (to_send)
2512		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2513
2514	return changed;
2515}
2516
2517/* State          Msg type      New state                Actions
2518 * EXCLUDE (X,Y)  BLOCK (A)     EXCLUDE (X+(A-Y),Y)      (A-X-Y)=Group Timer
2519 *                                                       Send Q(G,A-Y)
2520 */
2521static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2522				 struct net_bridge_mcast_port *pmctx,
2523				 struct net_bridge_port_group *pg, void *h_addr,
2524				 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2525{
2526	struct net_bridge_group_src *ent;
2527	u32 src_idx, to_send = 0;
2528	bool changed = false;
2529	struct br_ip src_ip;
2530
2531	hlist_for_each_entry(ent, &pg->src_list, node)
2532		ent->flags &= ~BR_SGRP_F_SEND;
2533
2534	memset(&src_ip, 0, sizeof(src_ip));
2535	src_ip.proto = pg->key.addr.proto;
2536	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2537		memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2538		ent = br_multicast_find_group_src(pg, &src_ip);
2539		if (!ent) {
2540			ent = br_multicast_new_group_src(pg, &src_ip);
2541			if (ent) {
2542				__grp_src_mod_timer(ent, pg->timer.expires);
2543				changed = true;
2544			}
2545		}
2546		if (ent && timer_pending(&ent->timer)) {
2547			ent->flags |= BR_SGRP_F_SEND;
2548			to_send++;
2549		}
2550	}
2551
2552	if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2553				    grec_type))
2554		changed = true;
2555
2556	if (to_send)
2557		__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2558
2559	return changed;
2560}
2561
2562static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2563			       struct net_bridge_mcast_port *pmctx,
2564			       struct net_bridge_port_group *pg, void *h_addr,
2565			       void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2566{
2567	bool changed = false;
2568
2569	switch (pg->filter_mode) {
2570	case MCAST_INCLUDE:
2571		changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2572					       nsrcs, addr_size, grec_type);
2573		break;
2574	case MCAST_EXCLUDE:
2575		changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2576					       nsrcs, addr_size, grec_type);
2577		break;
2578	}
2579
2580	if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2581	    br_multicast_eht_should_del_pg(pg)) {
2582		if (br_multicast_eht_should_del_pg(pg))
2583			pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2584		br_multicast_find_del_pg(pg->key.port->br, pg);
2585		/* a notification has already been sent and we shouldn't
2586		 * access pg after the delete so we have to return false
2587		 */
2588		changed = false;
2589	}
2590
2591	return changed;
2592}
2593
2594static struct net_bridge_port_group *
2595br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2596		       struct net_bridge_port *p,
2597		       const unsigned char *src)
2598{
2599	struct net_bridge *br __maybe_unused = mp->br;
2600	struct net_bridge_port_group *pg;
2601
2602	for (pg = mlock_dereference(mp->ports, br);
2603	     pg;
2604	     pg = mlock_dereference(pg->next, br))
2605		if (br_port_group_equal(pg, p, src))
2606			return pg;
2607
2608	return NULL;
2609}
2610
2611static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2612					 struct net_bridge_mcast_port *pmctx,
2613					 struct sk_buff *skb,
2614					 u16 vid)
2615{
2616	bool igmpv2 = brmctx->multicast_igmp_version == 2;
2617	struct net_bridge_mdb_entry *mdst;
2618	struct net_bridge_port_group *pg;
2619	const unsigned char *src;
2620	struct igmpv3_report *ih;
2621	struct igmpv3_grec *grec;
2622	int i, len, num, type;
2623	__be32 group, *h_addr;
2624	bool changed = false;
 
2625	int err = 0;
 
2626	u16 nsrcs;
2627
2628	ih = igmpv3_report_hdr(skb);
2629	num = ntohs(ih->ngrec);
2630	len = skb_transport_offset(skb) + sizeof(*ih);
2631
2632	for (i = 0; i < num; i++) {
2633		len += sizeof(*grec);
2634		if (!ip_mc_may_pull(skb, len))
2635			return -EINVAL;
2636
2637		grec = (void *)(skb->data + len - sizeof(*grec));
2638		group = grec->grec_mca;
2639		type = grec->grec_type;
2640		nsrcs = ntohs(grec->grec_nsrcs);
2641
2642		len += nsrcs * 4;
2643		if (!ip_mc_may_pull(skb, len))
2644			return -EINVAL;
2645
 
2646		switch (type) {
2647		case IGMPV3_MODE_IS_INCLUDE:
2648		case IGMPV3_MODE_IS_EXCLUDE:
2649		case IGMPV3_CHANGE_TO_INCLUDE:
2650		case IGMPV3_CHANGE_TO_EXCLUDE:
2651		case IGMPV3_ALLOW_NEW_SOURCES:
2652		case IGMPV3_BLOCK_OLD_SOURCES:
2653			break;
2654
2655		default:
2656			continue;
2657		}
2658
2659		src = eth_hdr(skb)->h_source;
2660		if (nsrcs == 0 &&
2661		    (type == IGMPV3_CHANGE_TO_INCLUDE ||
2662		     type == IGMPV3_MODE_IS_INCLUDE)) {
2663			if (!pmctx || igmpv2) {
2664				br_ip4_multicast_leave_group(brmctx, pmctx,
2665							     group, vid, src);
2666				continue;
2667			}
2668		} else {
2669			err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2670							 vid, src, igmpv2);
2671			if (err)
2672				break;
2673		}
2674
2675		if (!pmctx || igmpv2)
2676			continue;
2677
2678		spin_lock(&brmctx->br->multicast_lock);
2679		if (!br_multicast_ctx_should_use(brmctx, pmctx))
2680			goto unlock_continue;
2681
2682		mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2683		if (!mdst)
2684			goto unlock_continue;
2685		pg = br_multicast_find_port(mdst, pmctx->port, src);
2686		if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2687			goto unlock_continue;
2688		/* reload grec and host addr */
2689		grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2690		h_addr = &ip_hdr(skb)->saddr;
2691		switch (type) {
2692		case IGMPV3_ALLOW_NEW_SOURCES:
2693			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2694							   grec->grec_src,
2695							   nsrcs, sizeof(__be32), type);
2696			break;
2697		case IGMPV3_MODE_IS_INCLUDE:
2698			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2699							   grec->grec_src,
2700							   nsrcs, sizeof(__be32), type);
2701			break;
2702		case IGMPV3_MODE_IS_EXCLUDE:
2703			changed = br_multicast_isexc(brmctx, pg, h_addr,
2704						     grec->grec_src,
2705						     nsrcs, sizeof(__be32), type);
2706			break;
2707		case IGMPV3_CHANGE_TO_INCLUDE:
2708			changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2709						    grec->grec_src,
2710						    nsrcs, sizeof(__be32), type);
2711			break;
2712		case IGMPV3_CHANGE_TO_EXCLUDE:
2713			changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2714						    grec->grec_src,
2715						    nsrcs, sizeof(__be32), type);
2716			break;
2717		case IGMPV3_BLOCK_OLD_SOURCES:
2718			changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2719						     grec->grec_src,
2720						     nsrcs, sizeof(__be32), type);
2721			break;
2722		}
2723		if (changed)
2724			br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2725unlock_continue:
2726		spin_unlock(&brmctx->br->multicast_lock);
2727	}
2728
2729	return err;
2730}
2731
2732#if IS_ENABLED(CONFIG_IPV6)
2733static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2734					struct net_bridge_mcast_port *pmctx,
2735					struct sk_buff *skb,
2736					u16 vid)
2737{
2738	bool mldv1 = brmctx->multicast_mld_version == 1;
2739	struct net_bridge_mdb_entry *mdst;
2740	struct net_bridge_port_group *pg;
2741	unsigned int nsrcs_offset;
2742	struct mld2_report *mld2r;
2743	const unsigned char *src;
2744	struct in6_addr *h_addr;
2745	struct mld2_grec *grec;
2746	unsigned int grec_len;
2747	bool changed = false;
2748	int i, len, num;
 
2749	int err = 0;
2750
2751	if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
2752		return -EINVAL;
2753
2754	mld2r = (struct mld2_report *)icmp6_hdr(skb);
2755	num = ntohs(mld2r->mld2r_ngrec);
2756	len = skb_transport_offset(skb) + sizeof(*mld2r);
2757
2758	for (i = 0; i < num; i++) {
2759		__be16 *_nsrcs, __nsrcs;
2760		u16 nsrcs;
2761
2762		nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2763
2764		if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2765		    nsrcs_offset + sizeof(__nsrcs))
2766			return -EINVAL;
2767
2768		_nsrcs = skb_header_pointer(skb, nsrcs_offset,
2769					    sizeof(__nsrcs), &__nsrcs);
2770		if (!_nsrcs)
2771			return -EINVAL;
2772
2773		nsrcs = ntohs(*_nsrcs);
2774		grec_len = struct_size(grec, grec_src, nsrcs);
2775
2776		if (!ipv6_mc_may_pull(skb, len + grec_len))
2777			return -EINVAL;
2778
2779		grec = (struct mld2_grec *)(skb->data + len);
2780		len += grec_len;
2781
 
2782		switch (grec->grec_type) {
2783		case MLD2_MODE_IS_INCLUDE:
2784		case MLD2_MODE_IS_EXCLUDE:
2785		case MLD2_CHANGE_TO_INCLUDE:
2786		case MLD2_CHANGE_TO_EXCLUDE:
2787		case MLD2_ALLOW_NEW_SOURCES:
2788		case MLD2_BLOCK_OLD_SOURCES:
2789			break;
2790
2791		default:
2792			continue;
2793		}
2794
2795		src = eth_hdr(skb)->h_source;
2796		if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2797		     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2798		    nsrcs == 0) {
2799			if (!pmctx || mldv1) {
2800				br_ip6_multicast_leave_group(brmctx, pmctx,
2801							     &grec->grec_mca,
2802							     vid, src);
2803				continue;
2804			}
2805		} else {
2806			err = br_ip6_multicast_add_group(brmctx, pmctx,
2807							 &grec->grec_mca, vid,
2808							 src, mldv1);
2809			if (err)
2810				break;
2811		}
2812
2813		if (!pmctx || mldv1)
2814			continue;
2815
2816		spin_lock(&brmctx->br->multicast_lock);
2817		if (!br_multicast_ctx_should_use(brmctx, pmctx))
2818			goto unlock_continue;
2819
2820		mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
2821		if (!mdst)
2822			goto unlock_continue;
2823		pg = br_multicast_find_port(mdst, pmctx->port, src);
2824		if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2825			goto unlock_continue;
2826		h_addr = &ipv6_hdr(skb)->saddr;
2827		switch (grec->grec_type) {
2828		case MLD2_ALLOW_NEW_SOURCES:
2829			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2830							   grec->grec_src, nsrcs,
2831							   sizeof(struct in6_addr),
2832							   grec->grec_type);
2833			break;
2834		case MLD2_MODE_IS_INCLUDE:
2835			changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2836							   grec->grec_src, nsrcs,
2837							   sizeof(struct in6_addr),
2838							   grec->grec_type);
2839			break;
2840		case MLD2_MODE_IS_EXCLUDE:
2841			changed = br_multicast_isexc(brmctx, pg, h_addr,
2842						     grec->grec_src, nsrcs,
2843						     sizeof(struct in6_addr),
2844						     grec->grec_type);
2845			break;
2846		case MLD2_CHANGE_TO_INCLUDE:
2847			changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2848						    grec->grec_src, nsrcs,
2849						    sizeof(struct in6_addr),
2850						    grec->grec_type);
2851			break;
2852		case MLD2_CHANGE_TO_EXCLUDE:
2853			changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2854						    grec->grec_src, nsrcs,
2855						    sizeof(struct in6_addr),
2856						    grec->grec_type);
2857			break;
2858		case MLD2_BLOCK_OLD_SOURCES:
2859			changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2860						     grec->grec_src, nsrcs,
2861						     sizeof(struct in6_addr),
2862						     grec->grec_type);
2863			break;
2864		}
2865		if (changed)
2866			br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2867unlock_continue:
2868		spin_unlock(&brmctx->br->multicast_lock);
2869	}
2870
2871	return err;
2872}
2873#endif
2874
2875static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
2876					struct net_bridge_mcast_port *pmctx,
2877					struct br_ip *saddr)
2878{
2879	int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
2880	struct timer_list *own_timer, *other_timer;
2881	struct bridge_mcast_querier *querier;
2882
2883	switch (saddr->proto) {
2884	case htons(ETH_P_IP):
2885		querier = &brmctx->ip4_querier;
2886		own_timer = &brmctx->ip4_own_query.timer;
2887		other_timer = &brmctx->ip4_other_query.timer;
2888		if (!querier->addr.src.ip4 ||
2889		    ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
2890			goto update;
2891		break;
2892#if IS_ENABLED(CONFIG_IPV6)
2893	case htons(ETH_P_IPV6):
2894		querier = &brmctx->ip6_querier;
2895		own_timer = &brmctx->ip6_own_query.timer;
2896		other_timer = &brmctx->ip6_other_query.timer;
2897		if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
2898			goto update;
2899		break;
2900#endif
2901	default:
2902		return false;
2903	}
2904
2905	if (!timer_pending(own_timer) && !timer_pending(other_timer))
2906		goto update;
2907
2908	return false;
2909
2910update:
2911	br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
 
 
 
2912
2913	return true;
2914}
2915
2916static struct net_bridge_port *
2917__br_multicast_get_querier_port(struct net_bridge *br,
2918				const struct bridge_mcast_querier *querier)
 
2919{
2920	int port_ifidx = READ_ONCE(querier->port_ifidx);
2921	struct net_bridge_port *p;
2922	struct net_device *dev;
 
 
 
 
 
2923
2924	if (port_ifidx == 0)
2925		return NULL;
2926
2927	dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
2928	if (!dev)
2929		return NULL;
2930	p = br_port_get_rtnl_rcu(dev);
2931	if (!p || p->br != br)
2932		return NULL;
2933
2934	return p;
2935}
 
2936
2937size_t br_multicast_querier_state_size(void)
 
 
2938{
2939	return nla_total_size(0) +		/* nest attribute */
2940	       nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
2941	       nla_total_size(sizeof(int)) +    /* BRIDGE_QUERIER_IP_PORT */
2942	       nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
2943#if IS_ENABLED(CONFIG_IPV6)
2944	       nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
2945	       nla_total_size(sizeof(int)) +		 /* BRIDGE_QUERIER_IPV6_PORT */
2946	       nla_total_size_64bit(sizeof(u64)) +	 /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
2947#endif
2948	       0;
2949}
2950
2951/* protected by rtnl or rcu */
2952int br_multicast_dump_querier_state(struct sk_buff *skb,
2953				    const struct net_bridge_mcast *brmctx,
2954				    int nest_attr)
2955{
2956	struct bridge_mcast_querier querier = {};
2957	struct net_bridge_port *p;
2958	struct nlattr *nest;
2959
2960	if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
2961	    br_multicast_ctx_vlan_global_disabled(brmctx))
2962		return 0;
2963
2964	nest = nla_nest_start(skb, nest_attr);
2965	if (!nest)
2966		return -EMSGSIZE;
2967
2968	rcu_read_lock();
2969	if (!brmctx->multicast_querier &&
2970	    !timer_pending(&brmctx->ip4_other_query.timer))
2971		goto out_v6;
2972
2973	br_multicast_read_querier(&brmctx->ip4_querier, &querier);
2974	if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
2975			    querier.addr.src.ip4)) {
2976		rcu_read_unlock();
2977		goto out_err;
2978	}
2979
2980	p = __br_multicast_get_querier_port(brmctx->br, &querier);
2981	if (timer_pending(&brmctx->ip4_other_query.timer) &&
2982	    (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
2983			       br_timer_value(&brmctx->ip4_other_query.timer),
2984			       BRIDGE_QUERIER_PAD) ||
2985	     (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
2986		rcu_read_unlock();
2987		goto out_err;
2988	}
2989
2990out_v6:
2991#if IS_ENABLED(CONFIG_IPV6)
2992	if (!brmctx->multicast_querier &&
2993	    !timer_pending(&brmctx->ip6_other_query.timer))
2994		goto out;
2995
2996	br_multicast_read_querier(&brmctx->ip6_querier, &querier);
2997	if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
2998			     &querier.addr.src.ip6)) {
2999		rcu_read_unlock();
3000		goto out_err;
3001	}
3002
3003	p = __br_multicast_get_querier_port(brmctx->br, &querier);
3004	if (timer_pending(&brmctx->ip6_other_query.timer) &&
3005	    (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
3006			       br_timer_value(&brmctx->ip6_other_query.timer),
3007			       BRIDGE_QUERIER_PAD) ||
3008	     (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
3009			       p->dev->ifindex)))) {
3010		rcu_read_unlock();
3011		goto out_err;
3012	}
3013out:
3014#endif
3015	rcu_read_unlock();
3016	nla_nest_end(skb, nest);
3017	if (!nla_len(nest))
3018		nla_nest_cancel(skb, nest);
3019
3020	return 0;
3021
3022out_err:
3023	nla_nest_cancel(skb, nest);
3024	return -EMSGSIZE;
3025}
3026
3027static void
3028br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
3029				struct bridge_mcast_other_query *query,
3030				unsigned long max_delay)
3031{
3032	if (!timer_pending(&query->timer))
3033		query->delay_time = jiffies + max_delay;
3034
3035	mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
3036}
3037
3038static void br_port_mc_router_state_change(struct net_bridge_port *p,
3039					   bool is_mc_router)
3040{
3041	struct switchdev_attr attr = {
3042		.orig_dev = p->dev,
3043		.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
3044		.flags = SWITCHDEV_F_DEFER,
3045		.u.mrouter = is_mc_router,
3046	};
3047
3048	switchdev_port_attr_set(p->dev, &attr, NULL);
3049}
3050
3051static struct net_bridge_port *
3052br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
3053			     struct hlist_head *mc_router_list,
3054			     struct hlist_node *rlist)
3055{
3056	struct net_bridge_mcast_port *pmctx;
3057
3058#if IS_ENABLED(CONFIG_IPV6)
3059	if (mc_router_list == &brmctx->ip6_mc_router_list)
3060		pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3061				    ip6_rlist);
3062	else
3063#endif
3064		pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3065				    ip4_rlist);
3066
3067	return pmctx->port;
3068}
3069
3070static struct hlist_node *
3071br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
3072			    struct net_bridge_port *port,
3073			    struct hlist_head *mc_router_list)
3074
3075{
3076	struct hlist_node *slot = NULL;
3077	struct net_bridge_port *p;
3078	struct hlist_node *rlist;
3079
3080	hlist_for_each(rlist, mc_router_list) {
3081		p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
3082
3083		if ((unsigned long)port >= (unsigned long)p)
3084			break;
3085
3086		slot = rlist;
3087	}
3088
3089	return slot;
3090}
3091
3092static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
3093					   struct hlist_node *rnode)
3094{
3095#if IS_ENABLED(CONFIG_IPV6)
3096	if (rnode != &pmctx->ip6_rlist)
3097		return hlist_unhashed(&pmctx->ip6_rlist);
3098	else
3099		return hlist_unhashed(&pmctx->ip4_rlist);
3100#else
3101	return true;
3102#endif
3103}
3104
3105/* Add port to router_list
3106 *  list is maintained ordered by pointer value
3107 *  and locked by br->multicast_lock and RCU
3108 */
3109static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
3110				    struct net_bridge_mcast_port *pmctx,
3111				    struct hlist_node *rlist,
3112				    struct hlist_head *mc_router_list)
3113{
3114	struct hlist_node *slot;
 
3115
3116	if (!hlist_unhashed(rlist))
3117		return;
3118
3119	slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
 
 
 
 
3120
3121	if (slot)
3122		hlist_add_behind_rcu(rlist, slot);
3123	else
3124		hlist_add_head_rcu(rlist, mc_router_list);
3125
3126	/* For backwards compatibility for now, only notify if we
3127	 * switched from no IPv4/IPv6 multicast router to a new
3128	 * IPv4 or IPv6 multicast router.
3129	 */
3130	if (br_multicast_no_router_otherpf(pmctx, rlist)) {
3131		br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
3132		br_port_mc_router_state_change(pmctx->port, true);
3133	}
3134}
3135
3136/* Add port to router_list
3137 *  list is maintained ordered by pointer value
3138 *  and locked by br->multicast_lock and RCU
3139 */
3140static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
3141					struct net_bridge_mcast_port *pmctx)
3142{
3143	br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
3144				&brmctx->ip4_mc_router_list);
3145}
3146
3147/* Add port to router_list
3148 *  list is maintained ordered by pointer value
3149 *  and locked by br->multicast_lock and RCU
3150 */
3151static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
3152					struct net_bridge_mcast_port *pmctx)
3153{
3154#if IS_ENABLED(CONFIG_IPV6)
3155	br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
3156				&brmctx->ip6_mc_router_list);
3157#endif
3158}
3159
3160static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
3161				     struct net_bridge_mcast_port *pmctx,
3162				     struct timer_list *timer,
3163				     struct hlist_node *rlist,
3164				     struct hlist_head *mc_router_list)
3165{
3166	unsigned long now = jiffies;
3167
3168	if (!br_multicast_ctx_should_use(brmctx, pmctx))
3169		return;
3170
3171	if (!pmctx) {
3172		if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
3173			if (!br_ip4_multicast_is_router(brmctx) &&
3174			    !br_ip6_multicast_is_router(brmctx))
3175				br_mc_router_state_change(brmctx->br, true);
3176			mod_timer(timer, now + brmctx->multicast_querier_interval);
3177		}
3178		return;
3179	}
3180
3181	if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
3182	    pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3183		return;
3184
3185	br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3186	mod_timer(timer, now + brmctx->multicast_querier_interval);
3187}
3188
3189static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
3190					 struct net_bridge_mcast_port *pmctx)
3191{
3192	struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3193	struct hlist_node *rlist = NULL;
3194
3195	if (pmctx) {
3196		timer = &pmctx->ip4_mc_router_timer;
3197		rlist = &pmctx->ip4_rlist;
3198	}
3199
3200	br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3201				 &brmctx->ip4_mc_router_list);
3202}
3203
3204static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
3205					 struct net_bridge_mcast_port *pmctx)
 
 
 
3206{
3207#if IS_ENABLED(CONFIG_IPV6)
3208	struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3209	struct hlist_node *rlist = NULL;
3210
3211	if (pmctx) {
3212		timer = &pmctx->ip6_mc_router_timer;
3213		rlist = &pmctx->ip6_rlist;
3214	}
3215
3216	br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3217				 &brmctx->ip6_mc_router_list);
3218#endif
3219}
3220
3221static void
3222br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
3223				struct net_bridge_mcast_port *pmctx,
3224				struct bridge_mcast_other_query *query,
3225				struct br_ip *saddr,
3226				unsigned long max_delay)
3227{
3228	if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3229		return;
3230
3231	br_multicast_update_query_timer(brmctx, query, max_delay);
3232	br_ip4_multicast_mark_router(brmctx, pmctx);
3233}
3234
3235#if IS_ENABLED(CONFIG_IPV6)
3236static void
3237br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
3238				struct net_bridge_mcast_port *pmctx,
3239				struct bridge_mcast_other_query *query,
3240				struct br_ip *saddr,
3241				unsigned long max_delay)
3242{
3243	if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3244		return;
3245
3246	br_multicast_update_query_timer(brmctx, query, max_delay);
3247	br_ip6_multicast_mark_router(brmctx, pmctx);
3248}
3249#endif
3250
3251static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
3252				   struct net_bridge_mcast_port *pmctx,
3253				   struct sk_buff *skb,
3254				   u16 vid)
3255{
3256	unsigned int transport_len = ip_transport_len(skb);
3257	const struct iphdr *iph = ip_hdr(skb);
3258	struct igmphdr *ih = igmp_hdr(skb);
3259	struct net_bridge_mdb_entry *mp;
3260	struct igmpv3_query *ih3;
3261	struct net_bridge_port_group *p;
3262	struct net_bridge_port_group __rcu **pp;
3263	struct br_ip saddr = {};
3264	unsigned long max_delay;
3265	unsigned long now = jiffies;
3266	__be32 group;
3267
3268	spin_lock(&brmctx->br->multicast_lock);
3269	if (!br_multicast_ctx_should_use(brmctx, pmctx))
 
3270		goto out;
3271
3272	group = ih->group;
3273
3274	if (transport_len == sizeof(*ih)) {
3275		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3276
3277		if (!max_delay) {
3278			max_delay = 10 * HZ;
3279			group = 0;
3280		}
3281	} else if (transport_len >= sizeof(*ih3)) {
3282		ih3 = igmpv3_query_hdr(skb);
3283		if (ih3->nsrcs ||
3284		    (brmctx->multicast_igmp_version == 3 && group &&
3285		     ih3->suppress))
3286			goto out;
3287
3288		max_delay = ih3->code ?
3289			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3290	} else {
3291		goto out;
3292	}
3293
3294	if (!group) {
3295		saddr.proto = htons(ETH_P_IP);
3296		saddr.src.ip4 = iph->saddr;
3297
3298		br_ip4_multicast_query_received(brmctx, pmctx,
3299						&brmctx->ip4_other_query,
3300						&saddr, max_delay);
3301		goto out;
3302	}
3303
3304	mp = br_mdb_ip4_get(brmctx->br, group, vid);
3305	if (!mp)
3306		goto out;
3307
3308	max_delay *= brmctx->multicast_last_member_count;
3309
3310	if (mp->host_joined &&
3311	    (timer_pending(&mp->timer) ?
3312	     time_after(mp->timer.expires, now + max_delay) :
3313	     try_to_del_timer_sync(&mp->timer) >= 0))
3314		mod_timer(&mp->timer, now + max_delay);
3315
3316	for (pp = &mp->ports;
3317	     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3318	     pp = &p->next) {
3319		if (timer_pending(&p->timer) ?
3320		    time_after(p->timer.expires, now + max_delay) :
3321		    try_to_del_timer_sync(&p->timer) >= 0 &&
3322		    (brmctx->multicast_igmp_version == 2 ||
3323		     p->filter_mode == MCAST_EXCLUDE))
3324			mod_timer(&p->timer, now + max_delay);
3325	}
3326
3327out:
3328	spin_unlock(&brmctx->br->multicast_lock);
3329}
3330
3331#if IS_ENABLED(CONFIG_IPV6)
3332static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3333				  struct net_bridge_mcast_port *pmctx,
3334				  struct sk_buff *skb,
3335				  u16 vid)
3336{
3337	unsigned int transport_len = ipv6_transport_len(skb);
3338	struct mld_msg *mld;
3339	struct net_bridge_mdb_entry *mp;
3340	struct mld2_query *mld2q;
3341	struct net_bridge_port_group *p;
3342	struct net_bridge_port_group __rcu **pp;
3343	struct br_ip saddr = {};
3344	unsigned long max_delay;
3345	unsigned long now = jiffies;
3346	unsigned int offset = skb_transport_offset(skb);
3347	const struct in6_addr *group = NULL;
3348	bool is_general_query;
3349	int err = 0;
3350
3351	spin_lock(&brmctx->br->multicast_lock);
3352	if (!br_multicast_ctx_should_use(brmctx, pmctx))
 
3353		goto out;
3354
3355	if (transport_len == sizeof(*mld)) {
3356		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3357			err = -EINVAL;
3358			goto out;
3359		}
3360		mld = (struct mld_msg *) icmp6_hdr(skb);
3361		max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3362		if (max_delay)
3363			group = &mld->mld_mca;
3364	} else {
3365		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3366			err = -EINVAL;
3367			goto out;
3368		}
3369		mld2q = (struct mld2_query *)icmp6_hdr(skb);
3370		if (!mld2q->mld2q_nsrcs)
3371			group = &mld2q->mld2q_mca;
3372		if (brmctx->multicast_mld_version == 2 &&
3373		    !ipv6_addr_any(&mld2q->mld2q_mca) &&
3374		    mld2q->mld2q_suppress)
3375			goto out;
3376
3377		max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3378	}
3379
3380	is_general_query = group && ipv6_addr_any(group);
3381
3382	if (is_general_query) {
3383		saddr.proto = htons(ETH_P_IPV6);
3384		saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3385
3386		br_ip6_multicast_query_received(brmctx, pmctx,
3387						&brmctx->ip6_other_query,
3388						&saddr, max_delay);
3389		goto out;
3390	} else if (!group) {
3391		goto out;
3392	}
3393
3394	mp = br_mdb_ip6_get(brmctx->br, group, vid);
3395	if (!mp)
3396		goto out;
3397
3398	max_delay *= brmctx->multicast_last_member_count;
3399	if (mp->host_joined &&
3400	    (timer_pending(&mp->timer) ?
3401	     time_after(mp->timer.expires, now + max_delay) :
3402	     try_to_del_timer_sync(&mp->timer) >= 0))
3403		mod_timer(&mp->timer, now + max_delay);
3404
3405	for (pp = &mp->ports;
3406	     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3407	     pp = &p->next) {
3408		if (timer_pending(&p->timer) ?
3409		    time_after(p->timer.expires, now + max_delay) :
3410		    try_to_del_timer_sync(&p->timer) >= 0 &&
3411		    (brmctx->multicast_mld_version == 1 ||
3412		     p->filter_mode == MCAST_EXCLUDE))
3413			mod_timer(&p->timer, now + max_delay);
3414	}
3415
3416out:
3417	spin_unlock(&brmctx->br->multicast_lock);
3418	return err;
3419}
3420#endif
3421
3422static void
3423br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3424			 struct net_bridge_mcast_port *pmctx,
3425			 struct br_ip *group,
3426			 struct bridge_mcast_other_query *other_query,
3427			 struct bridge_mcast_own_query *own_query,
3428			 const unsigned char *src)
3429{
3430	struct net_bridge_mdb_entry *mp;
3431	struct net_bridge_port_group *p;
3432	unsigned long now;
3433	unsigned long time;
3434
3435	spin_lock(&brmctx->br->multicast_lock);
3436	if (!br_multicast_ctx_should_use(brmctx, pmctx))
 
3437		goto out;
3438
3439	mp = br_mdb_ip_get(brmctx->br, group);
3440	if (!mp)
3441		goto out;
3442
3443	if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3444		struct net_bridge_port_group __rcu **pp;
3445
3446		for (pp = &mp->ports;
3447		     (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3448		     pp = &p->next) {
3449			if (!br_port_group_equal(p, pmctx->port, src))
3450				continue;
3451
3452			if (p->flags & MDB_PG_FLAGS_PERMANENT)
3453				break;
3454
3455			p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3456			br_multicast_del_pg(mp, p, pp);
 
 
 
 
 
 
 
 
3457		}
3458		goto out;
3459	}
3460
3461	if (timer_pending(&other_query->timer))
3462		goto out;
3463
3464	if (brmctx->multicast_querier) {
3465		__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3466					  false, 0, NULL);
3467
3468		time = jiffies + brmctx->multicast_last_member_count *
3469				 brmctx->multicast_last_member_interval;
3470
3471		mod_timer(&own_query->timer, time);
3472
3473		for (p = mlock_dereference(mp->ports, brmctx->br);
3474		     p != NULL && pmctx != NULL;
3475		     p = mlock_dereference(p->next, brmctx->br)) {
3476			if (!br_port_group_equal(p, pmctx->port, src))
3477				continue;
3478
3479			if (!hlist_unhashed(&p->mglist) &&
3480			    (timer_pending(&p->timer) ?
3481			     time_after(p->timer.expires, time) :
3482			     try_to_del_timer_sync(&p->timer) >= 0)) {
3483				mod_timer(&p->timer, time);
3484			}
3485
3486			break;
3487		}
3488	}
3489
3490	now = jiffies;
3491	time = now + brmctx->multicast_last_member_count *
3492		     brmctx->multicast_last_member_interval;
3493
3494	if (!pmctx) {
3495		if (mp->host_joined &&
3496		    (timer_pending(&mp->timer) ?
3497		     time_after(mp->timer.expires, time) :
3498		     try_to_del_timer_sync(&mp->timer) >= 0)) {
3499			mod_timer(&mp->timer, time);
3500		}
3501
3502		goto out;
3503	}
3504
3505	for (p = mlock_dereference(mp->ports, brmctx->br);
3506	     p != NULL;
3507	     p = mlock_dereference(p->next, brmctx->br)) {
3508		if (p->key.port != pmctx->port)
3509			continue;
3510
3511		if (!hlist_unhashed(&p->mglist) &&
3512		    (timer_pending(&p->timer) ?
3513		     time_after(p->timer.expires, time) :
3514		     try_to_del_timer_sync(&p->timer) >= 0)) {
3515			mod_timer(&p->timer, time);
3516		}
3517
3518		break;
3519	}
3520out:
3521	spin_unlock(&brmctx->br->multicast_lock);
3522}
3523
3524static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3525					 struct net_bridge_mcast_port *pmctx,
3526					 __be32 group,
3527					 __u16 vid,
3528					 const unsigned char *src)
3529{
3530	struct br_ip br_group;
3531	struct bridge_mcast_own_query *own_query;
3532
3533	if (ipv4_is_local_multicast(group))
3534		return;
3535
3536	own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3537
3538	memset(&br_group, 0, sizeof(br_group));
3539	br_group.dst.ip4 = group;
3540	br_group.proto = htons(ETH_P_IP);
3541	br_group.vid = vid;
3542
3543	br_multicast_leave_group(brmctx, pmctx, &br_group,
3544				 &brmctx->ip4_other_query,
3545				 own_query, src);
3546}
3547
3548#if IS_ENABLED(CONFIG_IPV6)
3549static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3550					 struct net_bridge_mcast_port *pmctx,
3551					 const struct in6_addr *group,
3552					 __u16 vid,
3553					 const unsigned char *src)
3554{
3555	struct br_ip br_group;
3556	struct bridge_mcast_own_query *own_query;
3557
3558	if (ipv6_addr_is_ll_all_nodes(group))
3559		return;
3560
3561	own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3562
3563	memset(&br_group, 0, sizeof(br_group));
3564	br_group.dst.ip6 = *group;
3565	br_group.proto = htons(ETH_P_IPV6);
3566	br_group.vid = vid;
3567
3568	br_multicast_leave_group(brmctx, pmctx, &br_group,
3569				 &brmctx->ip6_other_query,
3570				 own_query, src);
3571}
3572#endif
3573
3574static void br_multicast_err_count(const struct net_bridge *br,
3575				   const struct net_bridge_port *p,
3576				   __be16 proto)
3577{
3578	struct bridge_mcast_stats __percpu *stats;
3579	struct bridge_mcast_stats *pstats;
3580
3581	if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3582		return;
3583
3584	if (p)
3585		stats = p->mcast_stats;
3586	else
3587		stats = br->mcast_stats;
3588	if (WARN_ON(!stats))
3589		return;
3590
3591	pstats = this_cpu_ptr(stats);
3592
3593	u64_stats_update_begin(&pstats->syncp);
3594	switch (proto) {
3595	case htons(ETH_P_IP):
3596		pstats->mstats.igmp_parse_errors++;
3597		break;
3598#if IS_ENABLED(CONFIG_IPV6)
3599	case htons(ETH_P_IPV6):
3600		pstats->mstats.mld_parse_errors++;
3601		break;
3602#endif
3603	}
3604	u64_stats_update_end(&pstats->syncp);
3605}
3606
3607static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3608			     struct net_bridge_mcast_port *pmctx,
3609			     const struct sk_buff *skb)
3610{
3611	unsigned int offset = skb_transport_offset(skb);
3612	struct pimhdr *pimhdr, _pimhdr;
3613
3614	pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3615	if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3616	    pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3617		return;
3618
3619	spin_lock(&brmctx->br->multicast_lock);
3620	br_ip4_multicast_mark_router(brmctx, pmctx);
3621	spin_unlock(&brmctx->br->multicast_lock);
3622}
3623
3624static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3625				    struct net_bridge_mcast_port *pmctx,
3626				    struct sk_buff *skb)
3627{
3628	if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3629	    igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3630		return -ENOMSG;
3631
3632	spin_lock(&brmctx->br->multicast_lock);
3633	br_ip4_multicast_mark_router(brmctx, pmctx);
3634	spin_unlock(&brmctx->br->multicast_lock);
3635
3636	return 0;
3637}
3638
3639static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3640				 struct net_bridge_mcast_port *pmctx,
3641				 struct sk_buff *skb,
3642				 u16 vid)
3643{
3644	struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3645	const unsigned char *src;
3646	struct igmphdr *ih;
3647	int err;
3648
3649	err = ip_mc_check_igmp(skb);
3650
3651	if (err == -ENOMSG) {
3652		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3653			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3654		} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3655			if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3656				br_multicast_pim(brmctx, pmctx, skb);
3657		} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3658			br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3659		}
3660
3661		return 0;
3662	} else if (err < 0) {
3663		br_multicast_err_count(brmctx->br, p, skb->protocol);
3664		return err;
3665	}
3666
3667	ih = igmp_hdr(skb);
3668	src = eth_hdr(skb)->h_source;
3669	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3670
3671	switch (ih->type) {
3672	case IGMP_HOST_MEMBERSHIP_REPORT:
3673	case IGMPV2_HOST_MEMBERSHIP_REPORT:
3674		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3675		err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3676						 src, true);
3677		break;
3678	case IGMPV3_HOST_MEMBERSHIP_REPORT:
3679		err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3680		break;
3681	case IGMP_HOST_MEMBERSHIP_QUERY:
3682		br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3683		break;
3684	case IGMP_HOST_LEAVE_MESSAGE:
3685		br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3686		break;
3687	}
3688
3689	br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3690			   BR_MCAST_DIR_RX);
3691
3692	return err;
3693}
3694
3695#if IS_ENABLED(CONFIG_IPV6)
3696static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3697				     struct net_bridge_mcast_port *pmctx,
3698				     struct sk_buff *skb)
3699{
 
 
 
 
 
 
 
 
 
3700	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3701		return;
 
 
3702
3703	spin_lock(&brmctx->br->multicast_lock);
3704	br_ip6_multicast_mark_router(brmctx, pmctx);
3705	spin_unlock(&brmctx->br->multicast_lock);
3706}
3707
3708static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3709				 struct net_bridge_mcast_port *pmctx,
3710				 struct sk_buff *skb,
3711				 u16 vid)
3712{
3713	struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3714	const unsigned char *src;
3715	struct mld_msg *mld;
3716	int err;
3717
3718	err = ipv6_mc_check_mld(skb);
3719
3720	if (err == -ENOMSG || err == -ENODATA) {
3721		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3722			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3723		if (err == -ENODATA &&
3724		    ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3725			br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
 
 
 
 
 
 
3726
3727		return 0;
3728	} else if (err < 0) {
3729		br_multicast_err_count(brmctx->br, p, skb->protocol);
3730		return err;
3731	}
3732
3733	mld = (struct mld_msg *)skb_transport_header(skb);
3734	BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3735
3736	switch (mld->mld_type) {
3737	case ICMPV6_MGM_REPORT:
3738		src = eth_hdr(skb)->h_source;
3739		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3740		err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3741						 vid, src, true);
3742		break;
3743	case ICMPV6_MLD2_REPORT:
3744		err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3745		break;
3746	case ICMPV6_MGM_QUERY:
3747		err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3748		break;
3749	case ICMPV6_MGM_REDUCTION:
3750		src = eth_hdr(skb)->h_source;
3751		br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3752					     src);
3753		break;
3754	}
3755
3756	br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3757			   BR_MCAST_DIR_RX);
3758
3759	return err;
3760}
3761#endif
3762
3763int br_multicast_rcv(struct net_bridge_mcast **brmctx,
3764		     struct net_bridge_mcast_port **pmctx,
3765		     struct net_bridge_vlan *vlan,
3766		     struct sk_buff *skb, u16 vid)
3767{
3768	int ret = 0;
3769
3770	BR_INPUT_SKB_CB(skb)->igmp = 0;
3771	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3772
3773	if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
3774		return 0;
3775
3776	if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
3777		const struct net_bridge_vlan *masterv;
3778
3779		/* the vlan has the master flag set only when transmitting
3780		 * through the bridge device
3781		 */
3782		if (br_vlan_is_master(vlan)) {
3783			masterv = vlan;
3784			*brmctx = &vlan->br_mcast_ctx;
3785			*pmctx = NULL;
3786		} else {
3787			masterv = vlan->brvlan;
3788			*brmctx = &vlan->brvlan->br_mcast_ctx;
3789			*pmctx = &vlan->port_mcast_ctx;
3790		}
3791
3792		if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
3793			return 0;
3794	}
3795
3796	switch (skb->protocol) {
3797	case htons(ETH_P_IP):
3798		ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
3799		break;
3800#if IS_ENABLED(CONFIG_IPV6)
3801	case htons(ETH_P_IPV6):
3802		ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
3803		break;
3804#endif
3805	}
3806
3807	return ret;
3808}
3809
3810static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
3811				       struct bridge_mcast_own_query *query,
3812				       struct bridge_mcast_querier *querier)
3813{
3814	spin_lock(&brmctx->br->multicast_lock);
3815	if (br_multicast_ctx_vlan_disabled(brmctx))
3816		goto out;
3817
3818	if (query->startup_sent < brmctx->multicast_startup_query_count)
3819		query->startup_sent++;
3820
3821	br_multicast_send_query(brmctx, NULL, query);
3822out:
3823	spin_unlock(&brmctx->br->multicast_lock);
3824}
3825
3826static void br_ip4_multicast_query_expired(struct timer_list *t)
3827{
3828	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3829						     ip4_own_query.timer);
3830
3831	br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
3832				   &brmctx->ip4_querier);
3833}
3834
3835#if IS_ENABLED(CONFIG_IPV6)
3836static void br_ip6_multicast_query_expired(struct timer_list *t)
3837{
3838	struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3839						     ip6_own_query.timer);
3840
3841	br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
3842				   &brmctx->ip6_querier);
3843}
3844#endif
3845
3846static void br_multicast_gc_work(struct work_struct *work)
3847{
3848	struct net_bridge *br = container_of(work, struct net_bridge,
3849					     mcast_gc_work);
3850	HLIST_HEAD(deleted_head);
3851
3852	spin_lock_bh(&br->multicast_lock);
3853	hlist_move_list(&br->mcast_gc_list, &deleted_head);
3854	spin_unlock_bh(&br->multicast_lock);
3855
3856	br_multicast_gc(&deleted_head);
3857}
3858
3859void br_multicast_ctx_init(struct net_bridge *br,
3860			   struct net_bridge_vlan *vlan,
3861			   struct net_bridge_mcast *brmctx)
3862{
3863	brmctx->br = br;
3864	brmctx->vlan = vlan;
3865	brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3866	brmctx->multicast_last_member_count = 2;
3867	brmctx->multicast_startup_query_count = 2;
3868
3869	brmctx->multicast_last_member_interval = HZ;
3870	brmctx->multicast_query_response_interval = 10 * HZ;
3871	brmctx->multicast_startup_query_interval = 125 * HZ / 4;
3872	brmctx->multicast_query_interval = 125 * HZ;
3873	brmctx->multicast_querier_interval = 255 * HZ;
3874	brmctx->multicast_membership_interval = 260 * HZ;
3875
3876	brmctx->ip4_other_query.delay_time = 0;
3877	brmctx->ip4_querier.port_ifidx = 0;
3878	seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
3879	brmctx->multicast_igmp_version = 2;
3880#if IS_ENABLED(CONFIG_IPV6)
3881	brmctx->multicast_mld_version = 1;
3882	brmctx->ip6_other_query.delay_time = 0;
3883	brmctx->ip6_querier.port_ifidx = 0;
3884	seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
3885#endif
 
 
3886
3887	timer_setup(&brmctx->ip4_mc_router_timer,
3888		    br_ip4_multicast_local_router_expired, 0);
3889	timer_setup(&brmctx->ip4_other_query.timer,
 
3890		    br_ip4_multicast_querier_expired, 0);
3891	timer_setup(&brmctx->ip4_own_query.timer,
3892		    br_ip4_multicast_query_expired, 0);
3893#if IS_ENABLED(CONFIG_IPV6)
3894	timer_setup(&brmctx->ip6_mc_router_timer,
3895		    br_ip6_multicast_local_router_expired, 0);
3896	timer_setup(&brmctx->ip6_other_query.timer,
3897		    br_ip6_multicast_querier_expired, 0);
3898	timer_setup(&brmctx->ip6_own_query.timer,
3899		    br_ip6_multicast_query_expired, 0);
3900#endif
3901}
3902
3903void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
3904{
3905	__br_multicast_stop(brmctx);
3906}
3907
3908void br_multicast_init(struct net_bridge *br)
3909{
3910	br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3911
3912	br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
3913
3914	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3915	br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3916
3917	spin_lock_init(&br->multicast_lock);
3918	INIT_HLIST_HEAD(&br->mdb_list);
3919	INIT_HLIST_HEAD(&br->mcast_gc_list);
3920	INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3921}
3922
3923static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3924{
3925	struct in_device *in_dev = in_dev_get(br->dev);
3926
3927	if (!in_dev)
3928		return;
3929
3930	__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3931	in_dev_put(in_dev);
3932}
3933
3934#if IS_ENABLED(CONFIG_IPV6)
3935static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3936{
3937	struct in6_addr addr;
3938
3939	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3940	ipv6_dev_mc_inc(br->dev, &addr);
3941}
3942#else
3943static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3944{
3945}
3946#endif
3947
3948void br_multicast_join_snoopers(struct net_bridge *br)
3949{
3950	br_ip4_multicast_join_snoopers(br);
3951	br_ip6_multicast_join_snoopers(br);
3952}
3953
3954static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3955{
3956	struct in_device *in_dev = in_dev_get(br->dev);
3957
3958	if (WARN_ON(!in_dev))
3959		return;
3960
3961	__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3962	in_dev_put(in_dev);
3963}
3964
3965#if IS_ENABLED(CONFIG_IPV6)
3966static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3967{
3968	struct in6_addr addr;
3969
3970	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3971	ipv6_dev_mc_dec(br->dev, &addr);
3972}
3973#else
3974static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3975{
3976}
3977#endif
3978
3979void br_multicast_leave_snoopers(struct net_bridge *br)
3980{
3981	br_ip4_multicast_leave_snoopers(br);
3982	br_ip6_multicast_leave_snoopers(br);
3983}
3984
3985static void __br_multicast_open_query(struct net_bridge *br,
3986				      struct bridge_mcast_own_query *query)
3987{
3988	query->startup_sent = 0;
3989
3990	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3991		return;
3992
3993	mod_timer(&query->timer, jiffies);
3994}
3995
3996static void __br_multicast_open(struct net_bridge_mcast *brmctx)
3997{
3998	__br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
3999#if IS_ENABLED(CONFIG_IPV6)
4000	__br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
4001#endif
4002}
4003
4004void br_multicast_open(struct net_bridge *br)
4005{
4006	ASSERT_RTNL();
 
4007
4008	if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4009		struct net_bridge_vlan_group *vg;
4010		struct net_bridge_vlan *vlan;
4011
4012		vg = br_vlan_group(br);
4013		if (vg) {
4014			list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4015				struct net_bridge_mcast *brmctx;
4016
4017				brmctx = &vlan->br_mcast_ctx;
4018				if (br_vlan_is_brentry(vlan) &&
4019				    !br_multicast_ctx_vlan_disabled(brmctx))
4020					__br_multicast_open(&vlan->br_mcast_ctx);
4021			}
4022		}
4023	} else {
4024		__br_multicast_open(&br->multicast_ctx);
4025	}
4026}
4027
4028static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
4029{
4030	del_timer_sync(&brmctx->ip4_mc_router_timer);
4031	del_timer_sync(&brmctx->ip4_other_query.timer);
4032	del_timer_sync(&brmctx->ip4_own_query.timer);
4033#if IS_ENABLED(CONFIG_IPV6)
4034	del_timer_sync(&brmctx->ip6_mc_router_timer);
4035	del_timer_sync(&brmctx->ip6_other_query.timer);
4036	del_timer_sync(&brmctx->ip6_own_query.timer);
4037#endif
4038}
4039
4040void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
4041{
4042	struct net_bridge *br;
4043
4044	/* it's okay to check for the flag without the multicast lock because it
4045	 * can only change under RTNL -> multicast_lock, we need the latter to
4046	 * sync with timers and packets
4047	 */
4048	if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
4049		return;
4050
4051	if (br_vlan_is_master(vlan)) {
4052		br = vlan->br;
4053
4054		if (!br_vlan_is_brentry(vlan) ||
4055		    (on &&
4056		     br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
4057			return;
4058
4059		spin_lock_bh(&br->multicast_lock);
4060		vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4061		spin_unlock_bh(&br->multicast_lock);
4062
4063		if (on)
4064			__br_multicast_open(&vlan->br_mcast_ctx);
4065		else
4066			__br_multicast_stop(&vlan->br_mcast_ctx);
4067	} else {
4068		struct net_bridge_mcast *brmctx;
4069
4070		brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
4071		if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
4072			return;
4073
4074		br = vlan->port->br;
4075		spin_lock_bh(&br->multicast_lock);
4076		vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4077		if (on)
4078			__br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
4079		else
4080			__br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
4081		spin_unlock_bh(&br->multicast_lock);
4082	}
4083}
4084
4085static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
4086{
4087	struct net_bridge_port *p;
4088
4089	if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
4090		return;
4091
4092	list_for_each_entry(p, &vlan->br->port_list, list) {
4093		struct net_bridge_vlan *vport;
4094
4095		vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
4096		if (!vport)
4097			continue;
4098		br_multicast_toggle_one_vlan(vport, on);
4099	}
4100
4101	if (br_vlan_is_brentry(vlan))
4102		br_multicast_toggle_one_vlan(vlan, on);
4103}
4104
4105int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
4106				      struct netlink_ext_ack *extack)
4107{
4108	struct net_bridge_vlan_group *vg;
4109	struct net_bridge_vlan *vlan;
4110	struct net_bridge_port *p;
4111
4112	if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
4113		return 0;
4114
4115	if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
4116		NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
4117		return -EINVAL;
4118	}
4119
4120	vg = br_vlan_group(br);
4121	if (!vg)
4122		return 0;
4123
4124	br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
4125
4126	/* disable/enable non-vlan mcast contexts based on vlan snooping */
4127	if (on)
4128		__br_multicast_stop(&br->multicast_ctx);
4129	else
4130		__br_multicast_open(&br->multicast_ctx);
4131	list_for_each_entry(p, &br->port_list, list) {
4132		if (on)
4133			br_multicast_disable_port(p);
4134		else
4135			br_multicast_enable_port(p);
4136	}
4137
4138	list_for_each_entry(vlan, &vg->vlan_list, vlist)
4139		br_multicast_toggle_vlan(vlan, on);
4140
4141	return 0;
4142}
4143
4144bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
4145{
4146	ASSERT_RTNL();
4147
4148	/* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
4149	 * requires only RTNL to change
4150	 */
4151	if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4152		return false;
4153
4154	vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
4155	br_multicast_toggle_vlan(vlan, on);
4156
4157	return true;
4158}
4159
4160void br_multicast_stop(struct net_bridge *br)
4161{
4162	ASSERT_RTNL();
 
 
 
 
 
 
4163
4164	if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4165		struct net_bridge_vlan_group *vg;
4166		struct net_bridge_vlan *vlan;
4167
4168		vg = br_vlan_group(br);
4169		if (vg) {
4170			list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4171				struct net_bridge_mcast *brmctx;
4172
4173				brmctx = &vlan->br_mcast_ctx;
4174				if (br_vlan_is_brentry(vlan) &&
4175				    !br_multicast_ctx_vlan_disabled(brmctx))
4176					__br_multicast_stop(&vlan->br_mcast_ctx);
4177			}
4178		}
4179	} else {
4180		__br_multicast_stop(&br->multicast_ctx);
4181	}
4182}
4183
4184void br_multicast_dev_del(struct net_bridge *br)
4185{
4186	struct net_bridge_mdb_entry *mp;
4187	HLIST_HEAD(deleted_head);
4188	struct hlist_node *tmp;
4189
4190	spin_lock_bh(&br->multicast_lock);
4191	hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
4192		br_multicast_del_mdb_entry(mp);
4193	hlist_move_list(&br->mcast_gc_list, &deleted_head);
 
 
 
 
4194	spin_unlock_bh(&br->multicast_lock);
4195
4196	br_multicast_ctx_deinit(&br->multicast_ctx);
4197	br_multicast_gc(&deleted_head);
4198	cancel_work_sync(&br->mcast_gc_work);
4199
4200	rcu_barrier();
4201}
4202
4203int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
4204{
4205	int err = -EINVAL;
4206
4207	spin_lock_bh(&brmctx->br->multicast_lock);
4208
4209	switch (val) {
4210	case MDB_RTR_TYPE_DISABLED:
4211	case MDB_RTR_TYPE_PERM:
4212		br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
4213		del_timer(&brmctx->ip4_mc_router_timer);
4214#if IS_ENABLED(CONFIG_IPV6)
4215		del_timer(&brmctx->ip6_mc_router_timer);
4216#endif
4217		brmctx->multicast_router = val;
4218		err = 0;
4219		break;
4220	case MDB_RTR_TYPE_TEMP_QUERY:
4221		if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4222			br_mc_router_state_change(brmctx->br, false);
4223		brmctx->multicast_router = val;
4224		err = 0;
4225		break;
4226	}
4227
4228	spin_unlock_bh(&brmctx->br->multicast_lock);
4229
4230	return err;
4231}
4232
4233static void
4234br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4235{
4236	if (!deleted)
4237		return;
4238
4239	/* For backwards compatibility for now, only notify if there is
4240	 * no multicast router anymore for both IPv4 and IPv6.
4241	 */
4242	if (!hlist_unhashed(&pmctx->ip4_rlist))
4243		return;
4244#if IS_ENABLED(CONFIG_IPV6)
4245	if (!hlist_unhashed(&pmctx->ip6_rlist))
4246		return;
4247#endif
4248
4249	br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4250	br_port_mc_router_state_change(pmctx->port, false);
4251
4252	/* don't allow timer refresh */
4253	if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
4254		pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4255}
4256
4257int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
4258				 unsigned long val)
4259{
4260	struct net_bridge_mcast *brmctx;
4261	unsigned long now = jiffies;
4262	int err = -EINVAL;
4263	bool del = false;
4264
4265	brmctx = br_multicast_port_ctx_get_global(pmctx);
4266	spin_lock_bh(&brmctx->br->multicast_lock);
4267	if (pmctx->multicast_router == val) {
4268		/* Refresh the temp router port timer */
4269		if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
4270			mod_timer(&pmctx->ip4_mc_router_timer,
4271				  now + brmctx->multicast_querier_interval);
4272#if IS_ENABLED(CONFIG_IPV6)
4273			mod_timer(&pmctx->ip6_mc_router_timer,
4274				  now + brmctx->multicast_querier_interval);
4275#endif
4276		}
4277		err = 0;
4278		goto unlock;
4279	}
4280	switch (val) {
4281	case MDB_RTR_TYPE_DISABLED:
4282		pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
4283		del |= br_ip4_multicast_rport_del(pmctx);
4284		del_timer(&pmctx->ip4_mc_router_timer);
4285		del |= br_ip6_multicast_rport_del(pmctx);
4286#if IS_ENABLED(CONFIG_IPV6)
4287		del_timer(&pmctx->ip6_mc_router_timer);
4288#endif
4289		br_multicast_rport_del_notify(pmctx, del);
4290		break;
4291	case MDB_RTR_TYPE_TEMP_QUERY:
4292		pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4293		del |= br_ip4_multicast_rport_del(pmctx);
4294		del |= br_ip6_multicast_rport_del(pmctx);
4295		br_multicast_rport_del_notify(pmctx, del);
4296		break;
4297	case MDB_RTR_TYPE_PERM:
4298		pmctx->multicast_router = MDB_RTR_TYPE_PERM;
4299		del_timer(&pmctx->ip4_mc_router_timer);
4300		br_ip4_multicast_add_router(brmctx, pmctx);
4301#if IS_ENABLED(CONFIG_IPV6)
4302		del_timer(&pmctx->ip6_mc_router_timer);
4303#endif
4304		br_ip6_multicast_add_router(brmctx, pmctx);
4305		break;
4306	case MDB_RTR_TYPE_TEMP:
4307		pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
4308		br_ip4_multicast_mark_router(brmctx, pmctx);
4309		br_ip6_multicast_mark_router(brmctx, pmctx);
4310		break;
4311	default:
4312		goto unlock;
4313	}
4314	err = 0;
4315unlock:
4316	spin_unlock_bh(&brmctx->br->multicast_lock);
4317
4318	return err;
4319}
4320
4321int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
4322{
4323	int err;
4324
4325	if (br_vlan_is_master(v))
4326		err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
4327	else
4328		err = br_multicast_set_port_router(&v->port_mcast_ctx,
4329						   mcast_router);
4330
4331	return err;
4332}
4333
4334static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4335				       struct bridge_mcast_own_query *query)
4336{
4337	struct net_bridge_port *port;
4338
4339	if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
4340		return;
4341
4342	__br_multicast_open_query(brmctx->br, query);
4343
4344	rcu_read_lock();
4345	list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4346		struct bridge_mcast_own_query *ip4_own_query;
4347#if IS_ENABLED(CONFIG_IPV6)
4348		struct bridge_mcast_own_query *ip6_own_query;
4349#endif
4350
4351		if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4352			continue;
4353
4354		if (br_multicast_ctx_is_vlan(brmctx)) {
4355			struct net_bridge_vlan *vlan;
4356
4357			vlan = br_vlan_find(nbp_vlan_group_rcu(port),
4358					    brmctx->vlan->vid);
4359			if (!vlan ||
4360			    br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
4361				continue;
4362
4363			ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
4364#if IS_ENABLED(CONFIG_IPV6)
4365			ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
4366#endif
4367		} else {
4368			ip4_own_query = &port->multicast_ctx.ip4_own_query;
4369#if IS_ENABLED(CONFIG_IPV6)
4370			ip6_own_query = &port->multicast_ctx.ip6_own_query;
4371#endif
4372		}
4373
4374		if (query == &brmctx->ip4_own_query)
4375			br_multicast_enable(ip4_own_query);
4376#if IS_ENABLED(CONFIG_IPV6)
4377		else
4378			br_multicast_enable(ip6_own_query);
4379#endif
4380	}
4381	rcu_read_unlock();
4382}
4383
4384int br_multicast_toggle(struct net_bridge *br, unsigned long val,
4385			struct netlink_ext_ack *extack)
4386{
4387	struct net_bridge_port *port;
4388	bool change_snoopers = false;
4389	int err = 0;
4390
4391	spin_lock_bh(&br->multicast_lock);
4392	if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4393		goto unlock;
4394
4395	err = br_mc_disabled_update(br->dev, val, extack);
4396	if (err == -EOPNOTSUPP)
4397		err = 0;
4398	if (err)
4399		goto unlock;
4400
4401	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4402	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4403		change_snoopers = true;
4404		goto unlock;
4405	}
4406
4407	if (!netif_running(br->dev))
4408		goto unlock;
4409
4410	br_multicast_open(br);
4411	list_for_each_entry(port, &br->port_list, list)
4412		__br_multicast_enable_port_ctx(&port->multicast_ctx);
4413
4414	change_snoopers = true;
4415
4416unlock:
4417	spin_unlock_bh(&br->multicast_lock);
4418
4419	/* br_multicast_join_snoopers has the potential to cause
4420	 * an MLD Report/Leave to be delivered to br_multicast_rcv,
4421	 * which would in turn call br_multicast_add_group, which would
4422	 * attempt to acquire multicast_lock. This function should be
4423	 * called after the lock has been released to avoid deadlocks on
4424	 * multicast_lock.
4425	 *
4426	 * br_multicast_leave_snoopers does not have the problem since
4427	 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4428	 * returns without calling br_multicast_ipv4/6_rcv if it's not
4429	 * enabled. Moved both functions out just for symmetry.
4430	 */
4431	if (change_snoopers) {
4432		if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4433			br_multicast_join_snoopers(br);
4434		else
4435			br_multicast_leave_snoopers(br);
4436	}
4437
4438	return err;
4439}
4440
4441bool br_multicast_enabled(const struct net_device *dev)
4442{
4443	struct net_bridge *br = netdev_priv(dev);
4444
4445	return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4446}
4447EXPORT_SYMBOL_GPL(br_multicast_enabled);
4448
4449bool br_multicast_router(const struct net_device *dev)
4450{
4451	struct net_bridge *br = netdev_priv(dev);
4452	bool is_router;
4453
4454	spin_lock_bh(&br->multicast_lock);
4455	is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4456	spin_unlock_bh(&br->multicast_lock);
4457	return is_router;
4458}
4459EXPORT_SYMBOL_GPL(br_multicast_router);
4460
4461int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
4462{
4463	unsigned long max_delay;
4464
4465	val = !!val;
4466
4467	spin_lock_bh(&brmctx->br->multicast_lock);
4468	if (brmctx->multicast_querier == val)
4469		goto unlock;
4470
4471	WRITE_ONCE(brmctx->multicast_querier, val);
4472	if (!val)
4473		goto unlock;
4474
4475	max_delay = brmctx->multicast_query_response_interval;
4476
4477	if (!timer_pending(&brmctx->ip4_other_query.timer))
4478		brmctx->ip4_other_query.delay_time = jiffies + max_delay;
4479
4480	br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4481
4482#if IS_ENABLED(CONFIG_IPV6)
4483	if (!timer_pending(&brmctx->ip6_other_query.timer))
4484		brmctx->ip6_other_query.delay_time = jiffies + max_delay;
4485
4486	br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4487#endif
4488
4489unlock:
4490	spin_unlock_bh(&brmctx->br->multicast_lock);
4491
4492	return 0;
4493}
4494
4495int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
4496				  unsigned long val)
4497{
4498	/* Currently we support only version 2 and 3 */
4499	switch (val) {
4500	case 2:
4501	case 3:
4502		break;
4503	default:
4504		return -EINVAL;
4505	}
4506
4507	spin_lock_bh(&brmctx->br->multicast_lock);
4508	brmctx->multicast_igmp_version = val;
4509	spin_unlock_bh(&brmctx->br->multicast_lock);
4510
4511	return 0;
4512}
4513
4514#if IS_ENABLED(CONFIG_IPV6)
4515int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
4516				 unsigned long val)
4517{
4518	/* Currently we support version 1 and 2 */
4519	switch (val) {
4520	case 1:
4521	case 2:
4522		break;
4523	default:
4524		return -EINVAL;
4525	}
4526
4527	spin_lock_bh(&brmctx->br->multicast_lock);
4528	brmctx->multicast_mld_version = val;
4529	spin_unlock_bh(&brmctx->br->multicast_lock);
4530
4531	return 0;
4532}
4533#endif
4534
4535void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
4536				  unsigned long val)
4537{
4538	unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4539
4540	if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
4541		br_info(brmctx->br,
4542			"trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
4543			jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
4544			jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
4545		intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
4546	}
4547
4548	brmctx->multicast_query_interval = intvl_jiffies;
4549}
4550
4551void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
4552					  unsigned long val)
4553{
4554	unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4555
4556	if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
4557		br_info(brmctx->br,
4558			"trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
4559			jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
4560			jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
4561		intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
4562	}
4563
4564	brmctx->multicast_startup_query_interval = intvl_jiffies;
4565}
4566
4567/**
4568 * br_multicast_list_adjacent - Returns snooped multicast addresses
4569 * @dev:	The bridge port adjacent to which to retrieve addresses
4570 * @br_ip_list:	The list to store found, snooped multicast IP addresses in
4571 *
4572 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4573 * snooping feature on all bridge ports of dev's bridge device, excluding
4574 * the addresses from dev itself.
4575 *
4576 * Returns the number of items added to br_ip_list.
4577 *
4578 * Notes:
4579 * - br_ip_list needs to be initialized by caller
4580 * - br_ip_list might contain duplicates in the end
4581 *   (needs to be taken care of by caller)
4582 * - br_ip_list needs to be freed by caller
4583 */
4584int br_multicast_list_adjacent(struct net_device *dev,
4585			       struct list_head *br_ip_list)
4586{
4587	struct net_bridge *br;
4588	struct net_bridge_port *port;
4589	struct net_bridge_port_group *group;
4590	struct br_ip_list *entry;
4591	int count = 0;
4592
4593	rcu_read_lock();
4594	if (!br_ip_list || !netif_is_bridge_port(dev))
4595		goto unlock;
4596
4597	port = br_port_get_rcu(dev);
4598	if (!port || !port->br)
4599		goto unlock;
4600
4601	br = port->br;
4602
4603	list_for_each_entry_rcu(port, &br->port_list, list) {
4604		if (!port->dev || port->dev == dev)
4605			continue;
4606
4607		hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4608			entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
4609			if (!entry)
4610				goto unlock;
4611
4612			entry->addr = group->key.addr;
4613			list_add(&entry->list, br_ip_list);
4614			count++;
4615		}
4616	}
4617
4618unlock:
4619	rcu_read_unlock();
4620	return count;
4621}
4622EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4623
4624/**
4625 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4626 * @dev: The bridge port providing the bridge on which to check for a querier
4627 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4628 *
4629 * Checks whether the given interface has a bridge on top and if so returns
4630 * true if a valid querier exists anywhere on the bridged link layer.
4631 * Otherwise returns false.
4632 */
4633bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4634{
4635	struct net_bridge *br;
4636	struct net_bridge_port *port;
4637	struct ethhdr eth;
4638	bool ret = false;
4639
4640	rcu_read_lock();
4641	if (!netif_is_bridge_port(dev))
4642		goto unlock;
4643
4644	port = br_port_get_rcu(dev);
4645	if (!port || !port->br)
4646		goto unlock;
4647
4648	br = port->br;
4649
4650	memset(&eth, 0, sizeof(eth));
4651	eth.h_proto = htons(proto);
4652
4653	ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
4654
4655unlock:
4656	rcu_read_unlock();
4657	return ret;
4658}
4659EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4660
4661/**
4662 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4663 * @dev: The bridge port adjacent to which to check for a querier
4664 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4665 *
4666 * Checks whether the given interface has a bridge on top and if so returns
4667 * true if a selected querier is behind one of the other ports of this
4668 * bridge. Otherwise returns false.
4669 */
4670bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4671{
4672	struct net_bridge_mcast *brmctx;
4673	struct net_bridge *br;
4674	struct net_bridge_port *port;
4675	bool ret = false;
4676	int port_ifidx;
4677
4678	rcu_read_lock();
4679	if (!netif_is_bridge_port(dev))
4680		goto unlock;
4681
4682	port = br_port_get_rcu(dev);
4683	if (!port || !port->br)
4684		goto unlock;
4685
4686	br = port->br;
4687	brmctx = &br->multicast_ctx;
4688
4689	switch (proto) {
4690	case ETH_P_IP:
4691		port_ifidx = brmctx->ip4_querier.port_ifidx;
4692		if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4693		    port_ifidx == port->dev->ifindex)
4694			goto unlock;
4695		break;
4696#if IS_ENABLED(CONFIG_IPV6)
4697	case ETH_P_IPV6:
4698		port_ifidx = brmctx->ip6_querier.port_ifidx;
4699		if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4700		    port_ifidx == port->dev->ifindex)
4701			goto unlock;
4702		break;
4703#endif
4704	default:
4705		goto unlock;
4706	}
4707
4708	ret = true;
4709unlock:
4710	rcu_read_unlock();
4711	return ret;
4712}
4713EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
4714
4715/**
4716 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
4717 * @dev: The bridge port adjacent to which to check for a multicast router
4718 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4719 *
4720 * Checks whether the given interface has a bridge on top and if so returns
4721 * true if a multicast router is behind one of the other ports of this
4722 * bridge. Otherwise returns false.
4723 */
4724bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
4725{
4726	struct net_bridge_mcast_port *pmctx;
4727	struct net_bridge_mcast *brmctx;
4728	struct net_bridge_port *port;
4729	bool ret = false;
4730
4731	rcu_read_lock();
4732	port = br_port_get_check_rcu(dev);
4733	if (!port)
4734		goto unlock;
4735
4736	brmctx = &port->br->multicast_ctx;
4737	switch (proto) {
4738	case ETH_P_IP:
4739		hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
4740					 ip4_rlist) {
4741			if (pmctx->port == port)
4742				continue;
4743
4744			ret = true;
4745			goto unlock;
4746		}
4747		break;
4748#if IS_ENABLED(CONFIG_IPV6)
4749	case ETH_P_IPV6:
4750		hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
4751					 ip6_rlist) {
4752			if (pmctx->port == port)
4753				continue;
4754
4755			ret = true;
4756			goto unlock;
4757		}
4758		break;
4759#endif
4760	default:
4761		/* when compiled without IPv6 support, be conservative and
4762		 * always assume presence of an IPv6 multicast router
4763		 */
4764		ret = true;
4765	}
4766
4767unlock:
4768	rcu_read_unlock();
4769	return ret;
4770}
4771EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
4772
4773static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
4774			       const struct sk_buff *skb, u8 type, u8 dir)
4775{
4776	struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
4777	__be16 proto = skb->protocol;
4778	unsigned int t_len;
4779
4780	u64_stats_update_begin(&pstats->syncp);
4781	switch (proto) {
4782	case htons(ETH_P_IP):
4783		t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
4784		switch (type) {
4785		case IGMP_HOST_MEMBERSHIP_REPORT:
4786			pstats->mstats.igmp_v1reports[dir]++;
4787			break;
4788		case IGMPV2_HOST_MEMBERSHIP_REPORT:
4789			pstats->mstats.igmp_v2reports[dir]++;
4790			break;
4791		case IGMPV3_HOST_MEMBERSHIP_REPORT:
4792			pstats->mstats.igmp_v3reports[dir]++;
4793			break;
4794		case IGMP_HOST_MEMBERSHIP_QUERY:
4795			if (t_len != sizeof(struct igmphdr)) {
4796				pstats->mstats.igmp_v3queries[dir]++;
4797			} else {
4798				unsigned int offset = skb_transport_offset(skb);
4799				struct igmphdr *ih, _ihdr;
4800
4801				ih = skb_header_pointer(skb, offset,
4802							sizeof(_ihdr), &_ihdr);
4803				if (!ih)
4804					break;
4805				if (!ih->code)
4806					pstats->mstats.igmp_v1queries[dir]++;
4807				else
4808					pstats->mstats.igmp_v2queries[dir]++;
4809			}
4810			break;
4811		case IGMP_HOST_LEAVE_MESSAGE:
4812			pstats->mstats.igmp_leaves[dir]++;
4813			break;
4814		}
4815		break;
4816#if IS_ENABLED(CONFIG_IPV6)
4817	case htons(ETH_P_IPV6):
4818		t_len = ntohs(ipv6_hdr(skb)->payload_len) +
4819			sizeof(struct ipv6hdr);
4820		t_len -= skb_network_header_len(skb);
4821		switch (type) {
4822		case ICMPV6_MGM_REPORT:
4823			pstats->mstats.mld_v1reports[dir]++;
4824			break;
4825		case ICMPV6_MLD2_REPORT:
4826			pstats->mstats.mld_v2reports[dir]++;
4827			break;
4828		case ICMPV6_MGM_QUERY:
4829			if (t_len != sizeof(struct mld_msg))
4830				pstats->mstats.mld_v2queries[dir]++;
4831			else
4832				pstats->mstats.mld_v1queries[dir]++;
4833			break;
4834		case ICMPV6_MGM_REDUCTION:
4835			pstats->mstats.mld_leaves[dir]++;
4836			break;
4837		}
4838		break;
4839#endif /* CONFIG_IPV6 */
4840	}
4841	u64_stats_update_end(&pstats->syncp);
4842}
4843
4844void br_multicast_count(struct net_bridge *br,
4845			const struct net_bridge_port *p,
4846			const struct sk_buff *skb, u8 type, u8 dir)
4847{
4848	struct bridge_mcast_stats __percpu *stats;
4849
4850	/* if multicast_disabled is true then igmp type can't be set */
4851	if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
4852		return;
4853
4854	if (p)
4855		stats = p->mcast_stats;
4856	else
4857		stats = br->mcast_stats;
4858	if (WARN_ON(!stats))
4859		return;
4860
4861	br_mcast_stats_add(stats, skb, type, dir);
4862}
4863
4864int br_multicast_init_stats(struct net_bridge *br)
4865{
4866	br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
4867	if (!br->mcast_stats)
4868		return -ENOMEM;
4869
4870	return 0;
4871}
4872
4873void br_multicast_uninit_stats(struct net_bridge *br)
4874{
4875	free_percpu(br->mcast_stats);
4876}
4877
4878/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
4879static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
4880{
4881	dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
4882	dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
4883}
4884
4885void br_multicast_get_stats(const struct net_bridge *br,
4886			    const struct net_bridge_port *p,
4887			    struct br_mcast_stats *dest)
4888{
4889	struct bridge_mcast_stats __percpu *stats;
4890	struct br_mcast_stats tdst;
4891	int i;
4892
4893	memset(dest, 0, sizeof(*dest));
4894	if (p)
4895		stats = p->mcast_stats;
4896	else
4897		stats = br->mcast_stats;
4898	if (WARN_ON(!stats))
4899		return;
4900
4901	memset(&tdst, 0, sizeof(tdst));
4902	for_each_possible_cpu(i) {
4903		struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
4904		struct br_mcast_stats temp;
4905		unsigned int start;
4906
4907		do {
4908			start = u64_stats_fetch_begin(&cpu_stats->syncp);
4909			memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
4910		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
4911
4912		mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
4913		mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
4914		mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
4915		mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
4916		mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
4917		mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
4918		mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
4919		tdst.igmp_parse_errors += temp.igmp_parse_errors;
4920
4921		mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
4922		mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
4923		mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
4924		mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
4925		mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
4926		tdst.mld_parse_errors += temp.mld_parse_errors;
4927	}
4928	memcpy(dest, &tdst, sizeof(*dest));
4929}
4930
4931int br_mdb_hash_init(struct net_bridge *br)
4932{
4933	int err;
4934
4935	err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
4936	if (err)
4937		return err;
4938
4939	err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
4940	if (err) {
4941		rhashtable_destroy(&br->sg_port_tbl);
4942		return err;
4943	}
4944
4945	return 0;
4946}
4947
4948void br_mdb_hash_fini(struct net_bridge *br)
4949{
4950	rhashtable_destroy(&br->sg_port_tbl);
4951	rhashtable_destroy(&br->mdb_hash_tbl);
4952}