Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Multicast support for IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
  10 */
  11
  12/* Changes:
  13 *
  14 *	yoshfuji	: fix format of router-alert option
  15 *	YOSHIFUJI Hideaki @USAGI:
  16 *		Fixed source address for MLD message based on
  17 *		<draft-ietf-magma-mld-source-05.txt>.
  18 *	YOSHIFUJI Hideaki @USAGI:
  19 *		- Ignore Queries for invalid addresses.
  20 *		- MLD for link-local addresses.
  21 *	David L Stevens <dlstevens@us.ibm.com>:
  22 *		- MLDv2 support
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/errno.h>
  27#include <linux/types.h>
  28#include <linux/string.h>
  29#include <linux/socket.h>
  30#include <linux/sockios.h>
  31#include <linux/jiffies.h>
  32#include <linux/times.h>
  33#include <linux/net.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/if_arp.h>
  38#include <linux/route.h>
  39#include <linux/init.h>
  40#include <linux/proc_fs.h>
  41#include <linux/seq_file.h>
  42#include <linux/slab.h>
  43#include <linux/pkt_sched.h>
  44#include <net/mld.h>
 
  45
  46#include <linux/netfilter.h>
  47#include <linux/netfilter_ipv6.h>
  48
  49#include <net/net_namespace.h>
  50#include <net/sock.h>
  51#include <net/snmp.h>
  52
  53#include <net/ipv6.h>
  54#include <net/protocol.h>
  55#include <net/if_inet6.h>
  56#include <net/ndisc.h>
  57#include <net/addrconf.h>
  58#include <net/ip6_route.h>
  59#include <net/inet_common.h>
  60
  61#include <net/ip6_checksum.h>
  62
  63/* Ensure that we have struct in6_addr aligned on 32bit word. */
  64static int __mld2_query_bugs[] __attribute__((__unused__)) = {
  65	BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
  66	BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
  67	BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
  68};
  69
 
  70static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
  71
  72static void igmp6_join_group(struct ifmcaddr6 *ma);
  73static void igmp6_leave_group(struct ifmcaddr6 *ma);
  74static void igmp6_timer_handler(struct timer_list *t);
  75
  76static void mld_gq_timer_expire(struct timer_list *t);
  77static void mld_ifc_timer_expire(struct timer_list *t);
  78static void mld_ifc_event(struct inet6_dev *idev);
  79static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
  80static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
  81static void mld_clear_delrec(struct inet6_dev *idev);
  82static bool mld_in_v1_mode(const struct inet6_dev *idev);
  83static int sf_setstate(struct ifmcaddr6 *pmc);
  84static void sf_markstate(struct ifmcaddr6 *pmc);
  85static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
  86static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  87			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
  88			  int delta);
  89static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  90			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
  91			  int delta);
  92static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
  93			    struct inet6_dev *idev);
  94static int __ipv6_dev_mc_inc(struct net_device *dev,
  95			     const struct in6_addr *addr, unsigned int mode);
  96
  97#define MLD_QRV_DEFAULT		2
  98/* RFC3810, 9.2. Query Interval */
  99#define MLD_QI_DEFAULT		(125 * HZ)
 100/* RFC3810, 9.3. Query Response Interval */
 101#define MLD_QRI_DEFAULT		(10 * HZ)
 102
 103/* RFC3810, 8.1 Query Version Distinctions */
 104#define MLD_V1_QUERY_LEN	24
 105#define MLD_V2_QUERY_LEN_MIN	28
 106
 107#define IPV6_MLD_MAX_MSF	64
 108
 109int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
 110int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
 111
 112/*
 113 *	socket join on multicast group
 114 */
 
 
 
 
 
 
 
 
 
 
 115
 116#define for_each_pmc_rcu(np, pmc)				\
 117	for (pmc = rcu_dereference(np->ipv6_mc_list);		\
 118	     pmc != NULL;					\
 119	     pmc = rcu_dereference(pmc->next))
 120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 121static int unsolicited_report_interval(struct inet6_dev *idev)
 122{
 123	int iv;
 124
 125	if (mld_in_v1_mode(idev))
 126		iv = idev->cnf.mldv1_unsolicited_report_interval;
 127	else
 128		iv = idev->cnf.mldv2_unsolicited_report_interval;
 129
 130	return iv > 0 ? iv : 1;
 131}
 132
 133static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
 134			       const struct in6_addr *addr, unsigned int mode)
 135{
 136	struct net_device *dev = NULL;
 137	struct ipv6_mc_socklist *mc_lst;
 138	struct ipv6_pinfo *np = inet6_sk(sk);
 139	struct net *net = sock_net(sk);
 140	int err;
 141
 142	ASSERT_RTNL();
 143
 144	if (!ipv6_addr_is_multicast(addr))
 145		return -EINVAL;
 146
 147	rcu_read_lock();
 148	for_each_pmc_rcu(np, mc_lst) {
 149		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 150		    ipv6_addr_equal(&mc_lst->addr, addr)) {
 151			rcu_read_unlock();
 152			return -EADDRINUSE;
 153		}
 154	}
 155	rcu_read_unlock();
 156
 157	mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
 158
 159	if (!mc_lst)
 160		return -ENOMEM;
 161
 162	mc_lst->next = NULL;
 163	mc_lst->addr = *addr;
 164
 165	if (ifindex == 0) {
 166		struct rt6_info *rt;
 167		rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
 168		if (rt) {
 169			dev = rt->dst.dev;
 170			ip6_rt_put(rt);
 171		}
 172	} else
 173		dev = __dev_get_by_index(net, ifindex);
 174
 175	if (!dev) {
 176		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 177		return -ENODEV;
 178	}
 179
 180	mc_lst->ifindex = dev->ifindex;
 181	mc_lst->sfmode = mode;
 182	rwlock_init(&mc_lst->sflock);
 183	mc_lst->sflist = NULL;
 184
 185	/*
 186	 *	now add/increase the group membership on the device
 187	 */
 188
 189	err = __ipv6_dev_mc_inc(dev, addr, mode);
 190
 191	if (err) {
 192		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 193		return err;
 194	}
 195
 196	mc_lst->next = np->ipv6_mc_list;
 197	rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
 198
 199	return 0;
 200}
 201
 202int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 203{
 204	return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
 205}
 206EXPORT_SYMBOL(ipv6_sock_mc_join);
 207
 208int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
 209			  const struct in6_addr *addr, unsigned int mode)
 210{
 211	return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
 212}
 213
 214/*
 215 *	socket leave on multicast group
 216 */
 217int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
 218{
 219	struct ipv6_pinfo *np = inet6_sk(sk);
 220	struct ipv6_mc_socklist *mc_lst;
 221	struct ipv6_mc_socklist __rcu **lnk;
 222	struct net *net = sock_net(sk);
 223
 224	ASSERT_RTNL();
 225
 226	if (!ipv6_addr_is_multicast(addr))
 227		return -EINVAL;
 228
 229	for (lnk = &np->ipv6_mc_list;
 230	     (mc_lst = rtnl_dereference(*lnk)) != NULL;
 231	      lnk = &mc_lst->next) {
 232		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 233		    ipv6_addr_equal(&mc_lst->addr, addr)) {
 234			struct net_device *dev;
 235
 236			*lnk = mc_lst->next;
 237
 238			dev = __dev_get_by_index(net, mc_lst->ifindex);
 239			if (dev) {
 240				struct inet6_dev *idev = __in6_dev_get(dev);
 241
 242				(void) ip6_mc_leave_src(sk, mc_lst, idev);
 243				if (idev)
 244					__ipv6_dev_mc_dec(idev, &mc_lst->addr);
 245			} else
 246				(void) ip6_mc_leave_src(sk, mc_lst, NULL);
 
 247
 248			atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
 249			kfree_rcu(mc_lst, rcu);
 250			return 0;
 251		}
 252	}
 253
 254	return -EADDRNOTAVAIL;
 255}
 256EXPORT_SYMBOL(ipv6_sock_mc_drop);
 257
 258/* called with rcu_read_lock() */
 259static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
 260					     const struct in6_addr *group,
 261					     int ifindex)
 262{
 263	struct net_device *dev = NULL;
 264	struct inet6_dev *idev = NULL;
 265
 266	if (ifindex == 0) {
 267		struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
 268
 269		if (rt) {
 270			dev = rt->dst.dev;
 271			ip6_rt_put(rt);
 272		}
 273	} else
 274		dev = dev_get_by_index_rcu(net, ifindex);
 
 275
 276	if (!dev)
 277		return NULL;
 278	idev = __in6_dev_get(dev);
 279	if (!idev)
 280		return NULL;
 281	read_lock_bh(&idev->lock);
 282	if (idev->dead) {
 283		read_unlock_bh(&idev->lock);
 284		return NULL;
 285	}
 286	return idev;
 287}
 288
 289void __ipv6_sock_mc_close(struct sock *sk)
 290{
 291	struct ipv6_pinfo *np = inet6_sk(sk);
 292	struct ipv6_mc_socklist *mc_lst;
 293	struct net *net = sock_net(sk);
 294
 295	ASSERT_RTNL();
 296
 297	while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
 298		struct net_device *dev;
 299
 300		np->ipv6_mc_list = mc_lst->next;
 301
 302		dev = __dev_get_by_index(net, mc_lst->ifindex);
 303		if (dev) {
 304			struct inet6_dev *idev = __in6_dev_get(dev);
 305
 306			(void) ip6_mc_leave_src(sk, mc_lst, idev);
 307			if (idev)
 308				__ipv6_dev_mc_dec(idev, &mc_lst->addr);
 309		} else
 310			(void) ip6_mc_leave_src(sk, mc_lst, NULL);
 
 311
 312		atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
 313		kfree_rcu(mc_lst, rcu);
 314	}
 315}
 316
 317void ipv6_sock_mc_close(struct sock *sk)
 318{
 319	struct ipv6_pinfo *np = inet6_sk(sk);
 320
 321	if (!rcu_access_pointer(np->ipv6_mc_list))
 322		return;
 
 323	rtnl_lock();
 
 324	__ipv6_sock_mc_close(sk);
 
 325	rtnl_unlock();
 326}
 327
 328int ip6_mc_source(int add, int omode, struct sock *sk,
 329	struct group_source_req *pgsr)
 330{
 331	struct in6_addr *source, *group;
 332	struct ipv6_mc_socklist *pmc;
 333	struct inet6_dev *idev;
 334	struct ipv6_pinfo *inet6 = inet6_sk(sk);
 335	struct ip6_sf_socklist *psl;
 336	struct net *net = sock_net(sk);
 337	int i, j, rv;
 338	int leavegroup = 0;
 339	int pmclocked = 0;
 340	int err;
 341
 342	source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
 343	group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
 344
 345	if (!ipv6_addr_is_multicast(group))
 346		return -EINVAL;
 347
 348	rcu_read_lock();
 349	idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
 350	if (!idev) {
 351		rcu_read_unlock();
 352		return -ENODEV;
 353	}
 354
 355	err = -EADDRNOTAVAIL;
 356
 357	for_each_pmc_rcu(inet6, pmc) {
 
 358		if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
 359			continue;
 360		if (ipv6_addr_equal(&pmc->addr, group))
 361			break;
 362	}
 363	if (!pmc) {		/* must have a prior join */
 364		err = -EINVAL;
 365		goto done;
 366	}
 367	/* if a source filter was set, must be the same mode as before */
 368	if (pmc->sflist) {
 369		if (pmc->sfmode != omode) {
 370			err = -EINVAL;
 371			goto done;
 372		}
 373	} else if (pmc->sfmode != omode) {
 374		/* allow mode switches for empty-set filters */
 375		ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
 376		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
 377		pmc->sfmode = omode;
 378	}
 379
 380	write_lock(&pmc->sflock);
 381	pmclocked = 1;
 382
 383	psl = pmc->sflist;
 384	if (!add) {
 385		if (!psl)
 386			goto done;	/* err = -EADDRNOTAVAIL */
 387		rv = !0;
 388		for (i = 0; i < psl->sl_count; i++) {
 389			rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
 390			if (rv == 0)
 391				break;
 392		}
 393		if (rv)		/* source not found */
 394			goto done;	/* err = -EADDRNOTAVAIL */
 395
 396		/* special case - (INCLUDE, empty) == LEAVE_GROUP */
 397		if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
 398			leavegroup = 1;
 399			goto done;
 400		}
 401
 402		/* update the interface filter */
 403		ip6_mc_del_src(idev, group, omode, 1, source, 1);
 404
 405		for (j = i+1; j < psl->sl_count; j++)
 406			psl->sl_addr[j-1] = psl->sl_addr[j];
 407		psl->sl_count--;
 408		err = 0;
 409		goto done;
 410	}
 411	/* else, add a new source to the filter */
 412
 413	if (psl && psl->sl_count >= sysctl_mld_max_msf) {
 414		err = -ENOBUFS;
 415		goto done;
 416	}
 417	if (!psl || psl->sl_count == psl->sl_max) {
 418		struct ip6_sf_socklist *newpsl;
 419		int count = IP6_SFBLOCK;
 420
 421		if (psl)
 422			count += psl->sl_max;
 423		newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
 
 424		if (!newpsl) {
 425			err = -ENOBUFS;
 426			goto done;
 427		}
 428		newpsl->sl_max = count;
 429		newpsl->sl_count = count - IP6_SFBLOCK;
 430		if (psl) {
 431			for (i = 0; i < psl->sl_count; i++)
 432				newpsl->sl_addr[i] = psl->sl_addr[i];
 433			sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
 
 434		}
 435		pmc->sflist = psl = newpsl;
 
 
 436	}
 437	rv = 1;	/* > 0 for insert logic below if sl_count is 0 */
 438	for (i = 0; i < psl->sl_count; i++) {
 439		rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
 440		if (rv == 0) /* There is an error in the address. */
 441			goto done;
 442	}
 443	for (j = psl->sl_count-1; j >= i; j--)
 444		psl->sl_addr[j+1] = psl->sl_addr[j];
 445	psl->sl_addr[i] = *source;
 446	psl->sl_count++;
 447	err = 0;
 448	/* update the interface list */
 449	ip6_mc_add_src(idev, group, omode, 1, source, 1);
 450done:
 451	if (pmclocked)
 452		write_unlock(&pmc->sflock);
 453	read_unlock_bh(&idev->lock);
 454	rcu_read_unlock();
 455	if (leavegroup)
 456		err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
 457	return err;
 458}
 459
 460int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
 461		    struct sockaddr_storage *list)
 462{
 463	const struct in6_addr *group;
 464	struct ipv6_mc_socklist *pmc;
 465	struct inet6_dev *idev;
 466	struct ipv6_pinfo *inet6 = inet6_sk(sk);
 467	struct ip6_sf_socklist *newpsl, *psl;
 468	struct net *net = sock_net(sk);
 469	int leavegroup = 0;
 470	int i, err;
 471
 472	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
 473
 474	if (!ipv6_addr_is_multicast(group))
 475		return -EINVAL;
 476	if (gsf->gf_fmode != MCAST_INCLUDE &&
 477	    gsf->gf_fmode != MCAST_EXCLUDE)
 478		return -EINVAL;
 479
 480	rcu_read_lock();
 481	idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
 482
 483	if (!idev) {
 484		rcu_read_unlock();
 485		return -ENODEV;
 486	}
 487
 488	err = 0;
 489
 490	if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
 491		leavegroup = 1;
 492		goto done;
 493	}
 494
 495	for_each_pmc_rcu(inet6, pmc) {
 496		if (pmc->ifindex != gsf->gf_interface)
 497			continue;
 498		if (ipv6_addr_equal(&pmc->addr, group))
 499			break;
 500	}
 501	if (!pmc) {		/* must have a prior join */
 502		err = -EINVAL;
 503		goto done;
 504	}
 505	if (gsf->gf_numsrc) {
 506		newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
 507							  GFP_ATOMIC);
 
 508		if (!newpsl) {
 509			err = -ENOBUFS;
 510			goto done;
 511		}
 512		newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
 513		for (i = 0; i < newpsl->sl_count; ++i, ++list) {
 514			struct sockaddr_in6 *psin6;
 515
 516			psin6 = (struct sockaddr_in6 *)list;
 517			newpsl->sl_addr[i] = psin6->sin6_addr;
 518		}
 
 519		err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
 520			newpsl->sl_count, newpsl->sl_addr, 0);
 521		if (err) {
 522			sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
 
 
 523			goto done;
 524		}
 
 525	} else {
 526		newpsl = NULL;
 527		(void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
 
 
 528	}
 529
 530	write_lock(&pmc->sflock);
 531	psl = pmc->sflist;
 532	if (psl) {
 533		(void) ip6_mc_del_src(idev, group, pmc->sfmode,
 534			psl->sl_count, psl->sl_addr, 0);
 535		sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
 536	} else
 537		(void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
 538	pmc->sflist = newpsl;
 
 
 
 
 539	pmc->sfmode = gsf->gf_fmode;
 540	write_unlock(&pmc->sflock);
 541	err = 0;
 542done:
 543	read_unlock_bh(&idev->lock);
 544	rcu_read_unlock();
 545	if (leavegroup)
 546		err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
 547	return err;
 548}
 549
 550int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
 551	struct sockaddr_storage *p)
 552{
 553	int err, i, count, copycount;
 554	const struct in6_addr *group;
 555	struct ipv6_mc_socklist *pmc;
 556	struct inet6_dev *idev;
 557	struct ipv6_pinfo *inet6 = inet6_sk(sk);
 558	struct ip6_sf_socklist *psl;
 559	struct net *net = sock_net(sk);
 
 560
 561	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
 562
 563	if (!ipv6_addr_is_multicast(group))
 564		return -EINVAL;
 565
 566	rcu_read_lock();
 567	idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
 568
 569	if (!idev) {
 570		rcu_read_unlock();
 571		return -ENODEV;
 572	}
 573
 574	err = -EADDRNOTAVAIL;
 575	/* changes to the ipv6_mc_list require the socket lock and
 576	 * rtnl lock. We have the socket lock and rcu read lock,
 577	 * so reading the list is safe.
 578	 */
 579
 580	for_each_pmc_rcu(inet6, pmc) {
 581		if (pmc->ifindex != gsf->gf_interface)
 582			continue;
 583		if (ipv6_addr_equal(group, &pmc->addr))
 584			break;
 585	}
 586	if (!pmc)		/* must have a prior join */
 587		goto done;
 
 588	gsf->gf_fmode = pmc->sfmode;
 589	psl = pmc->sflist;
 590	count = psl ? psl->sl_count : 0;
 591	read_unlock_bh(&idev->lock);
 592	rcu_read_unlock();
 593
 594	copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
 595	gsf->gf_numsrc = count;
 596	/* changes to psl require the socket lock, and a write lock
 597	 * on pmc->sflock. We have the socket lock so reading here is safe.
 598	 */
 599	for (i = 0; i < copycount; i++, p++) {
 600		struct sockaddr_in6 *psin6;
 601		struct sockaddr_storage ss;
 602
 603		psin6 = (struct sockaddr_in6 *)&ss;
 604		memset(&ss, 0, sizeof(ss));
 605		psin6->sin6_family = AF_INET6;
 606		psin6->sin6_addr = psl->sl_addr[i];
 607		if (copy_to_user(p, &ss, sizeof(ss)))
 608			return -EFAULT;
 
 609	}
 610	return 0;
 611done:
 612	read_unlock_bh(&idev->lock);
 613	rcu_read_unlock();
 614	return err;
 615}
 616
 617bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
 618		    const struct in6_addr *src_addr)
 619{
 620	struct ipv6_pinfo *np = inet6_sk(sk);
 621	struct ipv6_mc_socklist *mc;
 622	struct ip6_sf_socklist *psl;
 623	bool rv = true;
 624
 625	rcu_read_lock();
 626	for_each_pmc_rcu(np, mc) {
 627		if (ipv6_addr_equal(&mc->addr, mc_addr))
 628			break;
 629	}
 630	if (!mc) {
 631		rcu_read_unlock();
 632		return np->mc_all;
 633	}
 634	read_lock(&mc->sflock);
 635	psl = mc->sflist;
 636	if (!psl) {
 637		rv = mc->sfmode == MCAST_EXCLUDE;
 638	} else {
 639		int i;
 640
 641		for (i = 0; i < psl->sl_count; i++) {
 642			if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
 643				break;
 644		}
 645		if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
 646			rv = false;
 647		if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
 648			rv = false;
 649	}
 650	read_unlock(&mc->sflock);
 651	rcu_read_unlock();
 652
 653	return rv;
 654}
 655
 
 656static void igmp6_group_added(struct ifmcaddr6 *mc)
 657{
 658	struct net_device *dev = mc->idev->dev;
 659	char buf[MAX_ADDR_LEN];
 660
 661	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
 662	    IPV6_ADDR_SCOPE_LINKLOCAL)
 663		return;
 664
 665	spin_lock_bh(&mc->mca_lock);
 666	if (!(mc->mca_flags&MAF_LOADED)) {
 667		mc->mca_flags |= MAF_LOADED;
 668		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
 669			dev_mc_add(dev, buf);
 670	}
 671	spin_unlock_bh(&mc->mca_lock);
 672
 673	if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
 674		return;
 675
 676	if (mld_in_v1_mode(mc->idev)) {
 677		igmp6_join_group(mc);
 678		return;
 679	}
 680	/* else v2 */
 681
 682	/* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
 683	 * should not send filter-mode change record as the mode
 684	 * should be from IN() to IN(A).
 685	 */
 686	if (mc->mca_sfmode == MCAST_EXCLUDE)
 687		mc->mca_crcount = mc->idev->mc_qrv;
 688
 689	mld_ifc_event(mc->idev);
 690}
 691
 
 692static void igmp6_group_dropped(struct ifmcaddr6 *mc)
 693{
 694	struct net_device *dev = mc->idev->dev;
 695	char buf[MAX_ADDR_LEN];
 696
 697	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
 698	    IPV6_ADDR_SCOPE_LINKLOCAL)
 699		return;
 700
 701	spin_lock_bh(&mc->mca_lock);
 702	if (mc->mca_flags&MAF_LOADED) {
 703		mc->mca_flags &= ~MAF_LOADED;
 704		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
 705			dev_mc_del(dev, buf);
 706	}
 707
 708	spin_unlock_bh(&mc->mca_lock);
 709	if (mc->mca_flags & MAF_NOREPORT)
 710		return;
 711
 712	if (!mc->idev->dead)
 713		igmp6_leave_group(mc);
 714
 715	spin_lock_bh(&mc->mca_lock);
 716	if (del_timer(&mc->mca_timer))
 717		refcount_dec(&mc->mca_refcnt);
 718	spin_unlock_bh(&mc->mca_lock);
 719}
 720
 721/*
 722 * deleted ifmcaddr6 manipulation
 
 723 */
 724static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
 725{
 726	struct ifmcaddr6 *pmc;
 727
 728	/* this is an "ifmcaddr6" for convenience; only the fields below
 729	 * are actually used. In particular, the refcnt and users are not
 730	 * used for management of the delete list. Using the same structure
 731	 * for deleted items allows change reports to use common code with
 732	 * non-deleted or query-response MCA's.
 733	 */
 734	pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
 735	if (!pmc)
 736		return;
 737
 738	spin_lock_bh(&im->mca_lock);
 739	spin_lock_init(&pmc->mca_lock);
 740	pmc->idev = im->idev;
 741	in6_dev_hold(idev);
 742	pmc->mca_addr = im->mca_addr;
 743	pmc->mca_crcount = idev->mc_qrv;
 744	pmc->mca_sfmode = im->mca_sfmode;
 745	if (pmc->mca_sfmode == MCAST_INCLUDE) {
 746		struct ip6_sf_list *psf;
 747
 748		pmc->mca_tomb = im->mca_tomb;
 749		pmc->mca_sources = im->mca_sources;
 750		im->mca_tomb = im->mca_sources = NULL;
 751		for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
 
 
 
 
 752			psf->sf_crcount = pmc->mca_crcount;
 753	}
 754	spin_unlock_bh(&im->mca_lock);
 755
 756	spin_lock_bh(&idev->mc_lock);
 757	pmc->next = idev->mc_tomb;
 758	idev->mc_tomb = pmc;
 759	spin_unlock_bh(&idev->mc_lock);
 760}
 761
 
 762static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
 763{
 764	struct ifmcaddr6 *pmc, *pmc_prev;
 765	struct ip6_sf_list *psf;
 766	struct in6_addr *pmca = &im->mca_addr;
 
 767
 768	spin_lock_bh(&idev->mc_lock);
 769	pmc_prev = NULL;
 770	for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
 771		if (ipv6_addr_equal(&pmc->mca_addr, pmca))
 772			break;
 773		pmc_prev = pmc;
 774	}
 775	if (pmc) {
 776		if (pmc_prev)
 777			pmc_prev->next = pmc->next;
 778		else
 779			idev->mc_tomb = pmc->next;
 780	}
 781	spin_unlock_bh(&idev->mc_lock);
 782
 783	spin_lock_bh(&im->mca_lock);
 784	if (pmc) {
 785		im->idev = pmc->idev;
 786		if (im->mca_sfmode == MCAST_INCLUDE) {
 787			swap(im->mca_tomb, pmc->mca_tomb);
 788			swap(im->mca_sources, pmc->mca_sources);
 789			for (psf = im->mca_sources; psf; psf = psf->sf_next)
 
 
 
 
 
 
 
 790				psf->sf_crcount = idev->mc_qrv;
 791		} else {
 792			im->mca_crcount = idev->mc_qrv;
 793		}
 794		in6_dev_put(pmc->idev);
 795		ip6_mc_clear_src(pmc);
 796		kfree(pmc);
 797	}
 798	spin_unlock_bh(&im->mca_lock);
 799}
 800
 
 801static void mld_clear_delrec(struct inet6_dev *idev)
 802{
 803	struct ifmcaddr6 *pmc, *nextpmc;
 804
 805	spin_lock_bh(&idev->mc_lock);
 806	pmc = idev->mc_tomb;
 807	idev->mc_tomb = NULL;
 808	spin_unlock_bh(&idev->mc_lock);
 809
 810	for (; pmc; pmc = nextpmc) {
 811		nextpmc = pmc->next;
 812		ip6_mc_clear_src(pmc);
 813		in6_dev_put(pmc->idev);
 814		kfree(pmc);
 815	}
 816
 817	/* clear dead sources, too */
 818	read_lock_bh(&idev->lock);
 819	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
 820		struct ip6_sf_list *psf, *psf_next;
 821
 822		spin_lock_bh(&pmc->mca_lock);
 823		psf = pmc->mca_tomb;
 824		pmc->mca_tomb = NULL;
 825		spin_unlock_bh(&pmc->mca_lock);
 826		for (; psf; psf = psf_next) {
 827			psf_next = psf->sf_next;
 828			kfree(psf);
 829		}
 830	}
 831	read_unlock_bh(&idev->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 832}
 833
 834static void mca_get(struct ifmcaddr6 *mc)
 835{
 836	refcount_inc(&mc->mca_refcnt);
 837}
 838
 839static void ma_put(struct ifmcaddr6 *mc)
 840{
 841	if (refcount_dec_and_test(&mc->mca_refcnt)) {
 842		in6_dev_put(mc->idev);
 843		kfree(mc);
 844	}
 845}
 846
 
 847static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
 848				   const struct in6_addr *addr,
 849				   unsigned int mode)
 850{
 851	struct ifmcaddr6 *mc;
 852
 853	mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
 854	if (!mc)
 855		return NULL;
 856
 857	timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
 858
 859	mc->mca_addr = *addr;
 860	mc->idev = idev; /* reference taken by caller */
 861	mc->mca_users = 1;
 862	/* mca_stamp should be updated upon changes */
 863	mc->mca_cstamp = mc->mca_tstamp = jiffies;
 864	refcount_set(&mc->mca_refcnt, 1);
 865	spin_lock_init(&mc->mca_lock);
 866
 867	mc->mca_sfmode = mode;
 868	mc->mca_sfcount[mode] = 1;
 869
 870	if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
 871	    IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
 872		mc->mca_flags |= MAF_NOREPORT;
 873
 874	return mc;
 875}
 876
 877/*
 878 *	device multicast group inc (add if not found)
 879 */
 880static int __ipv6_dev_mc_inc(struct net_device *dev,
 881			     const struct in6_addr *addr, unsigned int mode)
 882{
 883	struct ifmcaddr6 *mc;
 884	struct inet6_dev *idev;
 885
 886	ASSERT_RTNL();
 887
 888	/* we need to take a reference on idev */
 889	idev = in6_dev_get(dev);
 890
 891	if (!idev)
 892		return -EINVAL;
 893
 894	write_lock_bh(&idev->lock);
 895	if (idev->dead) {
 896		write_unlock_bh(&idev->lock);
 897		in6_dev_put(idev);
 898		return -ENODEV;
 899	}
 900
 901	for (mc = idev->mc_list; mc; mc = mc->next) {
 
 902		if (ipv6_addr_equal(&mc->mca_addr, addr)) {
 903			mc->mca_users++;
 904			write_unlock_bh(&idev->lock);
 905			ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
 
 906			in6_dev_put(idev);
 907			return 0;
 908		}
 909	}
 910
 911	mc = mca_alloc(idev, addr, mode);
 912	if (!mc) {
 913		write_unlock_bh(&idev->lock);
 914		in6_dev_put(idev);
 915		return -ENOMEM;
 916	}
 917
 918	mc->next = idev->mc_list;
 919	idev->mc_list = mc;
 920
 921	/* Hold this for the code below before we unlock,
 922	 * it is already exposed via idev->mc_list.
 923	 */
 924	mca_get(mc);
 925	write_unlock_bh(&idev->lock);
 926
 927	mld_del_delrec(idev, mc);
 928	igmp6_group_added(mc);
 
 929	ma_put(mc);
 930	return 0;
 931}
 932
 933int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
 934{
 935	return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
 936}
 937EXPORT_SYMBOL(ipv6_dev_mc_inc);
 938
 939/*
 940 *	device multicast group del
 941 */
 942int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
 943{
 944	struct ifmcaddr6 *ma, **map;
 945
 946	ASSERT_RTNL();
 947
 948	write_lock_bh(&idev->lock);
 949	for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) {
 
 
 950		if (ipv6_addr_equal(&ma->mca_addr, addr)) {
 951			if (--ma->mca_users == 0) {
 952				*map = ma->next;
 953				write_unlock_bh(&idev->lock);
 954
 955				igmp6_group_dropped(ma);
 956				ip6_mc_clear_src(ma);
 
 957
 958				ma_put(ma);
 959				return 0;
 960			}
 961			write_unlock_bh(&idev->lock);
 962			return 0;
 963		}
 964	}
 965	write_unlock_bh(&idev->lock);
 966
 
 967	return -ENOENT;
 968}
 969
 970int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
 971{
 972	struct inet6_dev *idev;
 973	int err;
 974
 975	ASSERT_RTNL();
 976
 977	idev = __in6_dev_get(dev);
 978	if (!idev)
 979		err = -ENODEV;
 980	else
 981		err = __ipv6_dev_mc_dec(idev, addr);
 982
 983	return err;
 984}
 985EXPORT_SYMBOL(ipv6_dev_mc_dec);
 986
 987/*
 988 *	check if the interface/address pair is valid
 989 */
 990bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
 991			 const struct in6_addr *src_addr)
 992{
 993	struct inet6_dev *idev;
 994	struct ifmcaddr6 *mc;
 995	bool rv = false;
 996
 997	rcu_read_lock();
 998	idev = __in6_dev_get(dev);
 999	if (idev) {
1000		read_lock_bh(&idev->lock);
1001		for (mc = idev->mc_list; mc; mc = mc->next) {
1002			if (ipv6_addr_equal(&mc->mca_addr, group))
1003				break;
1004		}
1005		if (mc) {
1006			if (src_addr && !ipv6_addr_any(src_addr)) {
1007				struct ip6_sf_list *psf;
1008
1009				spin_lock_bh(&mc->mca_lock);
1010				for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
1011					if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1012						break;
1013				}
1014				if (psf)
1015					rv = psf->sf_count[MCAST_INCLUDE] ||
1016						psf->sf_count[MCAST_EXCLUDE] !=
1017						mc->mca_sfcount[MCAST_EXCLUDE];
1018				else
1019					rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
1020				spin_unlock_bh(&mc->mca_lock);
1021			} else
1022				rv = true; /* don't filter unspecified source */
1023		}
1024		read_unlock_bh(&idev->lock);
1025	}
1026	rcu_read_unlock();
1027	return rv;
1028}
1029
1030static void mld_gq_start_timer(struct inet6_dev *idev)
 
1031{
1032	unsigned long tv = prandom_u32() % idev->mc_maxdelay;
1033
1034	idev->mc_gq_running = 1;
1035	if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1036		in6_dev_hold(idev);
1037}
1038
1039static void mld_gq_stop_timer(struct inet6_dev *idev)
 
1040{
1041	idev->mc_gq_running = 0;
1042	if (del_timer(&idev->mc_gq_timer))
1043		__in6_dev_put(idev);
1044}
1045
1046static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
 
1047{
1048	unsigned long tv = prandom_u32() % delay;
1049
1050	if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1051		in6_dev_hold(idev);
1052}
1053
1054static void mld_ifc_stop_timer(struct inet6_dev *idev)
 
1055{
1056	idev->mc_ifc_count = 0;
1057	if (del_timer(&idev->mc_ifc_timer))
1058		__in6_dev_put(idev);
1059}
1060
1061static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
 
1062{
1063	unsigned long tv = prandom_u32() % delay;
1064
1065	if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
1066		in6_dev_hold(idev);
1067}
1068
1069static void mld_dad_stop_timer(struct inet6_dev *idev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070{
1071	if (del_timer(&idev->mc_dad_timer))
1072		__in6_dev_put(idev);
1073}
1074
1075/*
1076 *	IGMP handling (alias multicast ICMPv6 messages)
 
1077 */
1078
1079static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1080{
1081	unsigned long delay = resptime;
1082
1083	/* Do not start timer for these addresses */
1084	if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1085	    IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1086		return;
1087
1088	if (del_timer(&ma->mca_timer)) {
1089		refcount_dec(&ma->mca_refcnt);
1090		delay = ma->mca_timer.expires - jiffies;
1091	}
1092
1093	if (delay >= resptime)
1094		delay = prandom_u32() % resptime;
1095
1096	ma->mca_timer.expires = jiffies + delay;
1097	if (!mod_timer(&ma->mca_timer, jiffies + delay))
1098		refcount_inc(&ma->mca_refcnt);
1099	ma->mca_flags |= MAF_TIMER_RUNNING;
1100}
1101
1102/* mark EXCLUDE-mode sources */
 
 
1103static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1104			     const struct in6_addr *srcs)
1105{
1106	struct ip6_sf_list *psf;
1107	int i, scount;
1108
1109	scount = 0;
1110	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1111		if (scount == nsrcs)
1112			break;
1113		for (i = 0; i < nsrcs; i++) {
1114			/* skip inactive filters */
1115			if (psf->sf_count[MCAST_INCLUDE] ||
1116			    pmc->mca_sfcount[MCAST_EXCLUDE] !=
1117			    psf->sf_count[MCAST_EXCLUDE])
1118				break;
1119			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1120				scount++;
1121				break;
1122			}
1123		}
1124	}
1125	pmc->mca_flags &= ~MAF_GSQUERY;
1126	if (scount == nsrcs)	/* all sources excluded */
1127		return false;
1128	return true;
1129}
1130
 
1131static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1132			    const struct in6_addr *srcs)
1133{
1134	struct ip6_sf_list *psf;
1135	int i, scount;
1136
1137	if (pmc->mca_sfmode == MCAST_EXCLUDE)
1138		return mld_xmarksources(pmc, nsrcs, srcs);
1139
1140	/* mark INCLUDE-mode sources */
1141
1142	scount = 0;
1143	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1144		if (scount == nsrcs)
1145			break;
1146		for (i = 0; i < nsrcs; i++) {
1147			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1148				psf->sf_gsresp = 1;
1149				scount++;
1150				break;
1151			}
1152		}
1153	}
1154	if (!scount) {
1155		pmc->mca_flags &= ~MAF_GSQUERY;
1156		return false;
1157	}
1158	pmc->mca_flags |= MAF_GSQUERY;
1159	return true;
1160}
1161
1162static int mld_force_mld_version(const struct inet6_dev *idev)
1163{
 
 
 
 
1164	/* Normally, both are 0 here. If enforcement to a particular is
1165	 * being used, individual device enforcement will have a lower
1166	 * precedence over 'all' device (.../conf/all/force_mld_version).
1167	 */
1168
1169	if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1170		return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1171	else
1172		return idev->cnf.force_mld_version;
1173}
1174
1175static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1176{
1177	return mld_force_mld_version(idev) == 2;
1178}
1179
1180static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1181{
1182	return mld_force_mld_version(idev) == 1;
1183}
1184
1185static bool mld_in_v1_mode(const struct inet6_dev *idev)
1186{
1187	if (mld_in_v2_mode_only(idev))
1188		return false;
1189	if (mld_in_v1_mode_only(idev))
1190		return true;
1191	if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1192		return true;
1193
1194	return false;
1195}
1196
1197static void mld_set_v1_mode(struct inet6_dev *idev)
1198{
1199	/* RFC3810, relevant sections:
1200	 *  - 9.1. Robustness Variable
1201	 *  - 9.2. Query Interval
1202	 *  - 9.3. Query Response Interval
1203	 *  - 9.12. Older Version Querier Present Timeout
1204	 */
1205	unsigned long switchback;
1206
1207	switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1208
1209	idev->mc_v1_seen = jiffies + switchback;
1210}
1211
1212static void mld_update_qrv(struct inet6_dev *idev,
1213			   const struct mld2_query *mlh2)
1214{
1215	/* RFC3810, relevant sections:
1216	 *  - 5.1.8. QRV (Querier's Robustness Variable)
1217	 *  - 9.1. Robustness Variable
1218	 */
1219
1220	/* The value of the Robustness Variable MUST NOT be zero,
1221	 * and SHOULD NOT be one. Catch this here if we ever run
1222	 * into such a case in future.
1223	 */
1224	const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1225	WARN_ON(idev->mc_qrv == 0);
1226
1227	if (mlh2->mld2q_qrv > 0)
1228		idev->mc_qrv = mlh2->mld2q_qrv;
1229
1230	if (unlikely(idev->mc_qrv < min_qrv)) {
1231		net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1232				     idev->mc_qrv, min_qrv);
1233		idev->mc_qrv = min_qrv;
1234	}
1235}
1236
1237static void mld_update_qi(struct inet6_dev *idev,
1238			  const struct mld2_query *mlh2)
1239{
1240	/* RFC3810, relevant sections:
1241	 *  - 5.1.9. QQIC (Querier's Query Interval Code)
1242	 *  - 9.2. Query Interval
1243	 *  - 9.12. Older Version Querier Present Timeout
1244	 *    (the [Query Interval] in the last Query received)
1245	 */
1246	unsigned long mc_qqi;
1247
1248	if (mlh2->mld2q_qqic < 128) {
1249		mc_qqi = mlh2->mld2q_qqic;
1250	} else {
1251		unsigned long mc_man, mc_exp;
1252
1253		mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1254		mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1255
1256		mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1257	}
1258
1259	idev->mc_qi = mc_qqi * HZ;
1260}
1261
1262static void mld_update_qri(struct inet6_dev *idev,
1263			   const struct mld2_query *mlh2)
1264{
1265	/* RFC3810, relevant sections:
1266	 *  - 5.1.3. Maximum Response Code
1267	 *  - 9.3. Query Response Interval
1268	 */
1269	idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1270}
1271
1272static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1273			  unsigned long *max_delay, bool v1_query)
1274{
1275	unsigned long mldv1_md;
1276
1277	/* Ignore v1 queries */
1278	if (mld_in_v2_mode_only(idev))
1279		return -EINVAL;
1280
1281	mldv1_md = ntohs(mld->mld_maxdelay);
1282
1283	/* When in MLDv1 fallback and a MLDv2 router start-up being
1284	 * unaware of current MLDv1 operation, the MRC == MRD mapping
1285	 * only works when the exponential algorithm is not being
1286	 * used (as MLDv1 is unaware of such things).
1287	 *
1288	 * According to the RFC author, the MLDv2 implementations
1289	 * he's aware of all use a MRC < 32768 on start up queries.
1290	 *
1291	 * Thus, should we *ever* encounter something else larger
1292	 * than that, just assume the maximum possible within our
1293	 * reach.
1294	 */
1295	if (!v1_query)
1296		mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1297
1298	*max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1299
1300	/* MLDv1 router present: we need to go into v1 mode *only*
1301	 * when an MLDv1 query is received as per section 9.12. of
1302	 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1303	 * queries MUST be of exactly 24 octets.
1304	 */
1305	if (v1_query)
1306		mld_set_v1_mode(idev);
1307
1308	/* cancel MLDv2 report timer */
1309	mld_gq_stop_timer(idev);
1310	/* cancel the interface change timer */
1311	mld_ifc_stop_timer(idev);
1312	/* clear deleted report items */
1313	mld_clear_delrec(idev);
1314
1315	return 0;
1316}
1317
1318static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1319			  unsigned long *max_delay)
1320{
1321	*max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1322
1323	mld_update_qrv(idev, mld);
1324	mld_update_qi(idev, mld);
1325	mld_update_qri(idev, mld);
1326
1327	idev->mc_maxdelay = *max_delay;
1328
1329	return 0;
1330}
1331
1332/* called with rcu_read_lock() */
1333int igmp6_event_query(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334{
1335	struct mld2_query *mlh2 = NULL;
1336	struct ifmcaddr6 *ma;
1337	const struct in6_addr *group;
1338	unsigned long max_delay;
1339	struct inet6_dev *idev;
 
1340	struct mld_msg *mld;
1341	int group_type;
1342	int mark = 0;
1343	int len, err;
1344
1345	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1346		return -EINVAL;
1347
1348	/* compute payload length excluding extension headers */
1349	len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1350	len -= skb_network_header_len(skb);
1351
1352	/* RFC3810 6.2
1353	 * Upon reception of an MLD message that contains a Query, the node
1354	 * checks if the source address of the message is a valid link-local
1355	 * address, if the Hop Limit is set to 1, and if the Router Alert
1356	 * option is present in the Hop-By-Hop Options header of the IPv6
1357	 * packet.  If any of these checks fails, the packet is dropped.
1358	 */
1359	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1360	    ipv6_hdr(skb)->hop_limit != 1 ||
1361	    !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1362	    IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1363		return -EINVAL;
1364
1365	idev = __in6_dev_get(skb->dev);
1366	if (!idev)
1367		return 0;
1368
1369	mld = (struct mld_msg *)icmp6_hdr(skb);
1370	group = &mld->mld_mca;
1371	group_type = ipv6_addr_type(group);
1372
1373	if (group_type != IPV6_ADDR_ANY &&
1374	    !(group_type&IPV6_ADDR_MULTICAST))
1375		return -EINVAL;
1376
1377	if (len < MLD_V1_QUERY_LEN) {
1378		return -EINVAL;
1379	} else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1380		err = mld_process_v1(idev, mld, &max_delay,
1381				     len == MLD_V1_QUERY_LEN);
1382		if (err < 0)
1383			return err;
1384	} else if (len >= MLD_V2_QUERY_LEN_MIN) {
1385		int srcs_offset = sizeof(struct mld2_query) -
1386				  sizeof(struct icmp6hdr);
1387
1388		if (!pskb_may_pull(skb, srcs_offset))
1389			return -EINVAL;
1390
1391		mlh2 = (struct mld2_query *)skb_transport_header(skb);
1392
1393		err = mld_process_v2(idev, mlh2, &max_delay);
1394		if (err < 0)
1395			return err;
1396
1397		if (group_type == IPV6_ADDR_ANY) { /* general query */
1398			if (mlh2->mld2q_nsrcs)
1399				return -EINVAL; /* no sources allowed */
1400
1401			mld_gq_start_timer(idev);
1402			return 0;
1403		}
1404		/* mark sources to include, if group & source-specific */
1405		if (mlh2->mld2q_nsrcs != 0) {
1406			if (!pskb_may_pull(skb, srcs_offset +
1407			    ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1408				return -EINVAL;
1409
1410			mlh2 = (struct mld2_query *)skb_transport_header(skb);
1411			mark = 1;
1412		}
1413	} else {
1414		return -EINVAL;
1415	}
1416
1417	read_lock_bh(&idev->lock);
1418	if (group_type == IPV6_ADDR_ANY) {
1419		for (ma = idev->mc_list; ma; ma = ma->next) {
1420			spin_lock_bh(&ma->mca_lock);
1421			igmp6_group_queried(ma, max_delay);
1422			spin_unlock_bh(&ma->mca_lock);
1423		}
1424	} else {
1425		for (ma = idev->mc_list; ma; ma = ma->next) {
1426			if (!ipv6_addr_equal(group, &ma->mca_addr))
1427				continue;
1428			spin_lock_bh(&ma->mca_lock);
1429			if (ma->mca_flags & MAF_TIMER_RUNNING) {
1430				/* gsquery <- gsquery && mark */
1431				if (!mark)
1432					ma->mca_flags &= ~MAF_GSQUERY;
1433			} else {
1434				/* gsquery <- mark */
1435				if (mark)
1436					ma->mca_flags |= MAF_GSQUERY;
1437				else
1438					ma->mca_flags &= ~MAF_GSQUERY;
1439			}
1440			if (!(ma->mca_flags & MAF_GSQUERY) ||
1441			    mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1442				igmp6_group_queried(ma, max_delay);
1443			spin_unlock_bh(&ma->mca_lock);
1444			break;
1445		}
1446	}
1447	read_unlock_bh(&idev->lock);
1448
1449	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450}
1451
1452/* called with rcu_read_lock() */
1453int igmp6_event_report(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1454{
1455	struct ifmcaddr6 *ma;
1456	struct inet6_dev *idev;
 
1457	struct mld_msg *mld;
1458	int addr_type;
1459
1460	/* Our own report looped back. Ignore it. */
1461	if (skb->pkt_type == PACKET_LOOPBACK)
1462		return 0;
1463
1464	/* send our report if the MC router may not have heard this report */
1465	if (skb->pkt_type != PACKET_MULTICAST &&
1466	    skb->pkt_type != PACKET_BROADCAST)
1467		return 0;
1468
1469	if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1470		return -EINVAL;
1471
1472	mld = (struct mld_msg *)icmp6_hdr(skb);
1473
1474	/* Drop reports with not link local source */
1475	addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1476	if (addr_type != IPV6_ADDR_ANY &&
1477	    !(addr_type&IPV6_ADDR_LINKLOCAL))
1478		return -EINVAL;
1479
1480	idev = __in6_dev_get(skb->dev);
1481	if (!idev)
1482		return -ENODEV;
1483
1484	/*
1485	 *	Cancel the timer for this group
1486	 */
1487
1488	read_lock_bh(&idev->lock);
1489	for (ma = idev->mc_list; ma; ma = ma->next) {
1490		if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1491			spin_lock(&ma->mca_lock);
1492			if (del_timer(&ma->mca_timer))
1493				refcount_dec(&ma->mca_refcnt);
1494			ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1495			spin_unlock(&ma->mca_lock);
1496			break;
1497		}
1498	}
1499	read_unlock_bh(&idev->lock);
1500	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501}
1502
1503static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1504		  int gdeleted, int sdeleted)
1505{
1506	switch (type) {
1507	case MLD2_MODE_IS_INCLUDE:
1508	case MLD2_MODE_IS_EXCLUDE:
1509		if (gdeleted || sdeleted)
1510			return false;
1511		if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1512			if (pmc->mca_sfmode == MCAST_INCLUDE)
1513				return true;
1514			/* don't include if this source is excluded
1515			 * in all filters
1516			 */
1517			if (psf->sf_count[MCAST_INCLUDE])
1518				return type == MLD2_MODE_IS_INCLUDE;
1519			return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1520				psf->sf_count[MCAST_EXCLUDE];
1521		}
1522		return false;
1523	case MLD2_CHANGE_TO_INCLUDE:
1524		if (gdeleted || sdeleted)
1525			return false;
1526		return psf->sf_count[MCAST_INCLUDE] != 0;
1527	case MLD2_CHANGE_TO_EXCLUDE:
1528		if (gdeleted || sdeleted)
1529			return false;
1530		if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1531		    psf->sf_count[MCAST_INCLUDE])
1532			return false;
1533		return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1534			psf->sf_count[MCAST_EXCLUDE];
1535	case MLD2_ALLOW_NEW_SOURCES:
1536		if (gdeleted || !psf->sf_crcount)
1537			return false;
1538		return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1539	case MLD2_BLOCK_OLD_SOURCES:
1540		if (pmc->mca_sfmode == MCAST_INCLUDE)
1541			return gdeleted || (psf->sf_crcount && sdeleted);
1542		return psf->sf_crcount && !gdeleted && !sdeleted;
1543	}
1544	return false;
1545}
1546
1547static int
1548mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1549{
1550	struct ip6_sf_list *psf;
1551	int scount = 0;
1552
1553	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1554		if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1555			continue;
1556		scount++;
1557	}
1558	return scount;
1559}
1560
1561static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1562		       struct net_device *dev,
1563		       const struct in6_addr *saddr,
1564		       const struct in6_addr *daddr,
1565		       int proto, int len)
1566{
1567	struct ipv6hdr *hdr;
1568
1569	skb->protocol = htons(ETH_P_IPV6);
1570	skb->dev = dev;
1571
1572	skb_reset_network_header(skb);
1573	skb_put(skb, sizeof(struct ipv6hdr));
1574	hdr = ipv6_hdr(skb);
1575
1576	ip6_flow_hdr(hdr, 0, 0);
1577
1578	hdr->payload_len = htons(len);
1579	hdr->nexthdr = proto;
1580	hdr->hop_limit = inet6_sk(sk)->hop_limit;
1581
1582	hdr->saddr = *saddr;
1583	hdr->daddr = *daddr;
1584}
1585
1586static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1587{
 
 
1588	struct net_device *dev = idev->dev;
1589	struct net *net = dev_net(dev);
1590	struct sock *sk = net->ipv6.igmp_sk;
1591	struct sk_buff *skb;
1592	struct mld2_report *pmr;
1593	struct in6_addr addr_buf;
1594	const struct in6_addr *saddr;
1595	int hlen = LL_RESERVED_SPACE(dev);
1596	int tlen = dev->needed_tailroom;
1597	unsigned int size = mtu + hlen + tlen;
1598	int err;
1599	u8 ra[8] = { IPPROTO_ICMPV6, 0,
1600		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
1601		     IPV6_TLV_PADN, 0 };
1602
1603	/* we assume size > sizeof(ra) here */
1604	/* limit our allocations to order-0 page */
1605	size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1606	skb = sock_alloc_send_skb(sk, size, 1, &err);
1607
 
 
 
 
 
1608	if (!skb)
1609		return NULL;
1610
1611	skb->priority = TC_PRIO_CONTROL;
1612	skb_reserve(skb, hlen);
1613	skb_tailroom_reserve(skb, mtu, tlen);
1614
1615	if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
 
 
 
 
 
 
1616		/* <draft-ietf-magma-mld-source-05.txt>:
1617		 * use unspecified address as the source address
1618		 * when a valid link-local address is not available.
1619		 */
1620		saddr = &in6addr_any;
1621	} else
1622		saddr = &addr_buf;
1623
1624	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1625
 
 
1626	skb_put_data(skb, ra, sizeof(ra));
1627
1628	skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1629	skb_put(skb, sizeof(*pmr));
1630	pmr = (struct mld2_report *)skb_transport_header(skb);
1631	pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1632	pmr->mld2r_resv1 = 0;
1633	pmr->mld2r_cksum = 0;
1634	pmr->mld2r_resv2 = 0;
1635	pmr->mld2r_ngrec = 0;
1636	return skb;
1637}
1638
1639static void mld_sendpack(struct sk_buff *skb)
1640{
1641	struct ipv6hdr *pip6 = ipv6_hdr(skb);
1642	struct mld2_report *pmr =
1643			      (struct mld2_report *)skb_transport_header(skb);
1644	int payload_len, mldlen;
1645	struct inet6_dev *idev;
1646	struct net *net = dev_net(skb->dev);
1647	int err;
1648	struct flowi6 fl6;
1649	struct dst_entry *dst;
1650
1651	rcu_read_lock();
1652	idev = __in6_dev_get(skb->dev);
1653	IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1654
1655	payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1656		sizeof(*pip6);
1657	mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1658	pip6->payload_len = htons(payload_len);
1659
1660	pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1661					   IPPROTO_ICMPV6,
1662					   csum_partial(skb_transport_header(skb),
1663							mldlen, 0));
1664
1665	icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1666			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1667			 skb->dev->ifindex);
1668	dst = icmp6_dst_alloc(skb->dev, &fl6);
1669
1670	err = 0;
1671	if (IS_ERR(dst)) {
1672		err = PTR_ERR(dst);
1673		dst = NULL;
1674	}
1675	skb_dst_set(skb, dst);
1676	if (err)
1677		goto err_out;
1678
1679	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1680		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1681		      dst_output);
1682out:
1683	if (!err) {
1684		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1685		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1686	} else {
1687		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1688	}
1689
1690	rcu_read_unlock();
1691	return;
1692
1693err_out:
1694	kfree_skb(skb);
1695	goto out;
1696}
1697
1698static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1699{
1700	return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1701}
1702
1703static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1704	int type, struct mld2_grec **ppgr, unsigned int mtu)
1705{
1706	struct mld2_report *pmr;
1707	struct mld2_grec *pgr;
1708
1709	if (!skb) {
1710		skb = mld_newpack(pmc->idev, mtu);
1711		if (!skb)
1712			return NULL;
1713	}
1714	pgr = skb_put(skb, sizeof(struct mld2_grec));
1715	pgr->grec_type = type;
1716	pgr->grec_auxwords = 0;
1717	pgr->grec_nsrcs = 0;
1718	pgr->grec_mca = pmc->mca_addr;	/* structure copy */
1719	pmr = (struct mld2_report *)skb_transport_header(skb);
1720	pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1721	*ppgr = pgr;
1722	return skb;
1723}
1724
1725#define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
1726
 
1727static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1728	int type, int gdeleted, int sdeleted, int crsend)
 
1729{
 
 
 
1730	struct inet6_dev *idev = pmc->idev;
1731	struct net_device *dev = idev->dev;
1732	struct mld2_report *pmr;
1733	struct mld2_grec *pgr = NULL;
1734	struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1735	int scount, stotal, first, isquery, truncate;
1736	unsigned int mtu;
1737
1738	if (pmc->mca_flags & MAF_NOREPORT)
1739		return skb;
1740
1741	mtu = READ_ONCE(dev->mtu);
1742	if (mtu < IPV6_MIN_MTU)
1743		return skb;
1744
1745	isquery = type == MLD2_MODE_IS_INCLUDE ||
1746		  type == MLD2_MODE_IS_EXCLUDE;
1747	truncate = type == MLD2_MODE_IS_EXCLUDE ||
1748		    type == MLD2_CHANGE_TO_EXCLUDE;
1749
1750	stotal = scount = 0;
1751
1752	psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1753
1754	if (!*psf_list)
1755		goto empty_source;
1756
1757	pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1758
1759	/* EX and TO_EX get a fresh packet, if needed */
1760	if (truncate) {
1761		if (pmr && pmr->mld2r_ngrec &&
1762		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1763			if (skb)
1764				mld_sendpack(skb);
1765			skb = mld_newpack(idev, mtu);
1766		}
1767	}
1768	first = 1;
1769	psf_prev = NULL;
1770	for (psf = *psf_list; psf; psf = psf_next) {
 
 
1771		struct in6_addr *psrc;
1772
1773		psf_next = psf->sf_next;
1774
1775		if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1776			psf_prev = psf;
1777			continue;
1778		}
1779
1780		/* Based on RFC3810 6.1. Should not send source-list change
1781		 * records when there is a filter mode change.
1782		 */
1783		if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1784		     (!gdeleted && pmc->mca_crcount)) &&
1785		    (type == MLD2_ALLOW_NEW_SOURCES ||
1786		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1787			goto decrease_sf_crcount;
1788
1789		/* clear marks on query responses */
1790		if (isquery)
1791			psf->sf_gsresp = 0;
1792
1793		if (AVAILABLE(skb) < sizeof(*psrc) +
1794		    first*sizeof(struct mld2_grec)) {
1795			if (truncate && !first)
1796				break;	 /* truncate these */
1797			if (pgr)
1798				pgr->grec_nsrcs = htons(scount);
1799			if (skb)
1800				mld_sendpack(skb);
1801			skb = mld_newpack(idev, mtu);
1802			first = 1;
1803			scount = 0;
1804		}
1805		if (first) {
1806			skb = add_grhead(skb, pmc, type, &pgr, mtu);
1807			first = 0;
1808		}
1809		if (!skb)
1810			return NULL;
1811		psrc = skb_put(skb, sizeof(*psrc));
1812		*psrc = psf->sf_addr;
1813		scount++; stotal++;
1814		if ((type == MLD2_ALLOW_NEW_SOURCES ||
1815		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1816decrease_sf_crcount:
1817			psf->sf_crcount--;
1818			if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1819				if (psf_prev)
1820					psf_prev->sf_next = psf->sf_next;
 
1821				else
1822					*psf_list = psf->sf_next;
1823				kfree(psf);
 
1824				continue;
1825			}
1826		}
1827		psf_prev = psf;
1828	}
1829
1830empty_source:
1831	if (!stotal) {
1832		if (type == MLD2_ALLOW_NEW_SOURCES ||
1833		    type == MLD2_BLOCK_OLD_SOURCES)
1834			return skb;
1835		if (pmc->mca_crcount || isquery || crsend) {
1836			/* make sure we have room for group header */
1837			if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1838				mld_sendpack(skb);
1839				skb = NULL; /* add_grhead will get a new one */
1840			}
1841			skb = add_grhead(skb, pmc, type, &pgr, mtu);
1842		}
1843	}
1844	if (pgr)
1845		pgr->grec_nsrcs = htons(scount);
1846
1847	if (isquery)
1848		pmc->mca_flags &= ~MAF_GSQUERY;	/* clear query state */
1849	return skb;
1850}
1851
 
1852static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1853{
1854	struct sk_buff *skb = NULL;
1855	int type;
1856
1857	read_lock_bh(&idev->lock);
1858	if (!pmc) {
1859		for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1860			if (pmc->mca_flags & MAF_NOREPORT)
1861				continue;
1862			spin_lock_bh(&pmc->mca_lock);
1863			if (pmc->mca_sfcount[MCAST_EXCLUDE])
1864				type = MLD2_MODE_IS_EXCLUDE;
1865			else
1866				type = MLD2_MODE_IS_INCLUDE;
1867			skb = add_grec(skb, pmc, type, 0, 0, 0);
1868			spin_unlock_bh(&pmc->mca_lock);
1869		}
1870	} else {
1871		spin_lock_bh(&pmc->mca_lock);
1872		if (pmc->mca_sfcount[MCAST_EXCLUDE])
1873			type = MLD2_MODE_IS_EXCLUDE;
1874		else
1875			type = MLD2_MODE_IS_INCLUDE;
1876		skb = add_grec(skb, pmc, type, 0, 0, 0);
1877		spin_unlock_bh(&pmc->mca_lock);
1878	}
1879	read_unlock_bh(&idev->lock);
1880	if (skb)
1881		mld_sendpack(skb);
1882}
1883
1884/*
1885 * remove zero-count source records from a source filter list
 
1886 */
1887static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1888{
1889	struct ip6_sf_list *psf_prev, *psf_next, *psf;
1890
1891	psf_prev = NULL;
1892	for (psf = *ppsf; psf; psf = psf_next) {
1893		psf_next = psf->sf_next;
 
 
1894		if (psf->sf_crcount == 0) {
1895			if (psf_prev)
1896				psf_prev->sf_next = psf->sf_next;
 
1897			else
1898				*ppsf = psf->sf_next;
1899			kfree(psf);
1900		} else
 
1901			psf_prev = psf;
 
1902	}
1903}
1904
 
1905static void mld_send_cr(struct inet6_dev *idev)
1906{
1907	struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1908	struct sk_buff *skb = NULL;
1909	int type, dtype;
1910
1911	read_lock_bh(&idev->lock);
1912	spin_lock(&idev->mc_lock);
1913
1914	/* deleted MCA's */
1915	pmc_prev = NULL;
1916	for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
1917		pmc_next = pmc->next;
 
 
1918		if (pmc->mca_sfmode == MCAST_INCLUDE) {
1919			type = MLD2_BLOCK_OLD_SOURCES;
1920			dtype = MLD2_BLOCK_OLD_SOURCES;
1921			skb = add_grec(skb, pmc, type, 1, 0, 0);
1922			skb = add_grec(skb, pmc, dtype, 1, 1, 0);
1923		}
1924		if (pmc->mca_crcount) {
1925			if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1926				type = MLD2_CHANGE_TO_INCLUDE;
1927				skb = add_grec(skb, pmc, type, 1, 0, 0);
1928			}
1929			pmc->mca_crcount--;
1930			if (pmc->mca_crcount == 0) {
1931				mld_clear_zeros(&pmc->mca_tomb);
1932				mld_clear_zeros(&pmc->mca_sources);
1933			}
1934		}
1935		if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1936		    !pmc->mca_sources) {
 
1937			if (pmc_prev)
1938				pmc_prev->next = pmc_next;
1939			else
1940				idev->mc_tomb = pmc_next;
1941			in6_dev_put(pmc->idev);
1942			kfree(pmc);
1943		} else
1944			pmc_prev = pmc;
1945	}
1946	spin_unlock(&idev->mc_lock);
1947
1948	/* change recs */
1949	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1950		spin_lock_bh(&pmc->mca_lock);
1951		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1952			type = MLD2_BLOCK_OLD_SOURCES;
1953			dtype = MLD2_ALLOW_NEW_SOURCES;
1954		} else {
1955			type = MLD2_ALLOW_NEW_SOURCES;
1956			dtype = MLD2_BLOCK_OLD_SOURCES;
1957		}
1958		skb = add_grec(skb, pmc, type, 0, 0, 0);
1959		skb = add_grec(skb, pmc, dtype, 0, 1, 0);	/* deleted sources */
1960
1961		/* filter mode changes */
1962		if (pmc->mca_crcount) {
1963			if (pmc->mca_sfmode == MCAST_EXCLUDE)
1964				type = MLD2_CHANGE_TO_EXCLUDE;
1965			else
1966				type = MLD2_CHANGE_TO_INCLUDE;
1967			skb = add_grec(skb, pmc, type, 0, 0, 0);
1968			pmc->mca_crcount--;
1969		}
1970		spin_unlock_bh(&pmc->mca_lock);
1971	}
1972	read_unlock_bh(&idev->lock);
1973	if (!skb)
1974		return;
1975	(void) mld_sendpack(skb);
1976}
1977
1978static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1979{
1980	struct net *net = dev_net(dev);
1981	struct sock *sk = net->ipv6.igmp_sk;
 
1982	struct inet6_dev *idev;
1983	struct sk_buff *skb;
1984	struct mld_msg *hdr;
1985	const struct in6_addr *snd_addr, *saddr;
1986	struct in6_addr addr_buf;
1987	int hlen = LL_RESERVED_SPACE(dev);
1988	int tlen = dev->needed_tailroom;
1989	int err, len, payload_len, full_len;
1990	u8 ra[8] = { IPPROTO_ICMPV6, 0,
1991		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
1992		     IPV6_TLV_PADN, 0 };
1993	struct flowi6 fl6;
1994	struct dst_entry *dst;
 
 
 
1995
1996	if (type == ICMPV6_MGM_REDUCTION)
1997		snd_addr = &in6addr_linklocal_allrouters;
1998	else
1999		snd_addr = addr;
2000
2001	len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2002	payload_len = len + sizeof(ra);
2003	full_len = sizeof(struct ipv6hdr) + payload_len;
2004
2005	rcu_read_lock();
2006	IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
2007		      IPSTATS_MIB_OUT, full_len);
2008	rcu_read_unlock();
2009
2010	skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
2011
 
 
 
2012	if (!skb) {
2013		rcu_read_lock();
2014		IP6_INC_STATS(net, __in6_dev_get(dev),
2015			      IPSTATS_MIB_OUTDISCARDS);
2016		rcu_read_unlock();
2017		return;
2018	}
 
 
 
2019	skb->priority = TC_PRIO_CONTROL;
2020	skb_reserve(skb, hlen);
2021
2022	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2023		/* <draft-ietf-magma-mld-source-05.txt>:
2024		 * use unspecified address as the source address
2025		 * when a valid link-local address is not available.
2026		 */
2027		saddr = &in6addr_any;
2028	} else
2029		saddr = &addr_buf;
2030
2031	ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2032
2033	skb_put_data(skb, ra, sizeof(ra));
2034
2035	hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2036	hdr->mld_type = type;
2037	hdr->mld_mca = *addr;
2038
2039	hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2040					 IPPROTO_ICMPV6,
2041					 csum_partial(hdr, len, 0));
2042
2043	rcu_read_lock();
2044	idev = __in6_dev_get(skb->dev);
2045
2046	icmpv6_flow_init(sk, &fl6, type,
2047			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2048			 skb->dev->ifindex);
2049	dst = icmp6_dst_alloc(skb->dev, &fl6);
2050	if (IS_ERR(dst)) {
2051		err = PTR_ERR(dst);
2052		goto err_out;
2053	}
2054
2055	skb_dst_set(skb, dst);
2056	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2057		      net, sk, skb, NULL, skb->dev,
2058		      dst_output);
2059out:
2060	if (!err) {
2061		ICMP6MSGOUT_INC_STATS(net, idev, type);
2062		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2063	} else
2064		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2065
2066	rcu_read_unlock();
2067	return;
2068
2069err_out:
2070	kfree_skb(skb);
2071	goto out;
2072}
2073
 
2074static void mld_send_initial_cr(struct inet6_dev *idev)
2075{
2076	struct sk_buff *skb;
2077	struct ifmcaddr6 *pmc;
2078	int type;
2079
2080	if (mld_in_v1_mode(idev))
2081		return;
2082
2083	skb = NULL;
2084	read_lock_bh(&idev->lock);
2085	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2086		spin_lock_bh(&pmc->mca_lock);
2087		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2088			type = MLD2_CHANGE_TO_EXCLUDE;
2089		else
2090			type = MLD2_ALLOW_NEW_SOURCES;
2091		skb = add_grec(skb, pmc, type, 0, 0, 1);
2092		spin_unlock_bh(&pmc->mca_lock);
2093	}
2094	read_unlock_bh(&idev->lock);
2095	if (skb)
2096		mld_sendpack(skb);
2097}
2098
2099void ipv6_mc_dad_complete(struct inet6_dev *idev)
2100{
 
2101	idev->mc_dad_count = idev->mc_qrv;
2102	if (idev->mc_dad_count) {
2103		mld_send_initial_cr(idev);
2104		idev->mc_dad_count--;
2105		if (idev->mc_dad_count)
2106			mld_dad_start_timer(idev,
2107					    unsolicited_report_interval(idev));
2108	}
 
2109}
2110
2111static void mld_dad_timer_expire(struct timer_list *t)
2112{
2113	struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
2114
 
 
2115	mld_send_initial_cr(idev);
2116	if (idev->mc_dad_count) {
2117		idev->mc_dad_count--;
2118		if (idev->mc_dad_count)
2119			mld_dad_start_timer(idev,
2120					    unsolicited_report_interval(idev));
2121	}
 
2122	in6_dev_put(idev);
2123}
2124
 
2125static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2126	const struct in6_addr *psfsrc)
2127{
2128	struct ip6_sf_list *psf, *psf_prev;
2129	int rv = 0;
2130
2131	psf_prev = NULL;
2132	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2133		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2134			break;
2135		psf_prev = psf;
2136	}
2137	if (!psf || psf->sf_count[sfmode] == 0) {
2138		/* source filter not found, or count wrong =>  bug */
2139		return -ESRCH;
2140	}
2141	psf->sf_count[sfmode]--;
2142	if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2143		struct inet6_dev *idev = pmc->idev;
2144
2145		/* no more filters for this source */
2146		if (psf_prev)
2147			psf_prev->sf_next = psf->sf_next;
 
2148		else
2149			pmc->mca_sources = psf->sf_next;
 
 
2150		if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2151		    !mld_in_v1_mode(idev)) {
2152			psf->sf_crcount = idev->mc_qrv;
2153			psf->sf_next = pmc->mca_tomb;
2154			pmc->mca_tomb = psf;
 
2155			rv = 1;
2156		} else
2157			kfree(psf);
 
2158	}
2159	return rv;
2160}
2161
 
2162static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2163			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2164			  int delta)
2165{
2166	struct ifmcaddr6 *pmc;
2167	int	changerec = 0;
2168	int	i, err;
2169
2170	if (!idev)
2171		return -ENODEV;
2172	read_lock_bh(&idev->lock);
2173	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2174		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2175			break;
2176	}
2177	if (!pmc) {
2178		/* MCA not found?? bug */
2179		read_unlock_bh(&idev->lock);
2180		return -ESRCH;
2181	}
2182	spin_lock_bh(&pmc->mca_lock);
2183	sf_markstate(pmc);
2184	if (!delta) {
2185		if (!pmc->mca_sfcount[sfmode]) {
2186			spin_unlock_bh(&pmc->mca_lock);
2187			read_unlock_bh(&idev->lock);
2188			return -EINVAL;
2189		}
2190		pmc->mca_sfcount[sfmode]--;
2191	}
2192	err = 0;
2193	for (i = 0; i < sfcount; i++) {
2194		int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2195
2196		changerec |= rv > 0;
2197		if (!err && rv < 0)
2198			err = rv;
2199	}
2200	if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2201	    pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2202	    pmc->mca_sfcount[MCAST_INCLUDE]) {
2203		struct ip6_sf_list *psf;
2204
2205		/* filter mode change */
2206		pmc->mca_sfmode = MCAST_INCLUDE;
2207		pmc->mca_crcount = idev->mc_qrv;
2208		idev->mc_ifc_count = pmc->mca_crcount;
2209		for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2210			psf->sf_crcount = 0;
2211		mld_ifc_event(pmc->idev);
2212	} else if (sf_setstate(pmc) || changerec)
2213		mld_ifc_event(pmc->idev);
2214	spin_unlock_bh(&pmc->mca_lock);
2215	read_unlock_bh(&idev->lock);
2216	return err;
2217}
2218
2219/*
2220 * Add multicast single-source filter to the interface list
 
2221 */
2222static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2223	const struct in6_addr *psfsrc)
2224{
2225	struct ip6_sf_list *psf, *psf_prev;
2226
2227	psf_prev = NULL;
2228	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2229		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2230			break;
2231		psf_prev = psf;
2232	}
2233	if (!psf) {
2234		psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
2235		if (!psf)
2236			return -ENOBUFS;
2237
2238		psf->sf_addr = *psfsrc;
2239		if (psf_prev) {
2240			psf_prev->sf_next = psf;
2241		} else
2242			pmc->mca_sources = psf;
 
2243	}
2244	psf->sf_count[sfmode]++;
2245	return 0;
2246}
2247
 
2248static void sf_markstate(struct ifmcaddr6 *pmc)
2249{
2250	struct ip6_sf_list *psf;
2251	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2252
2253	for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2254		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2255			psf->sf_oldin = mca_xcount ==
2256				psf->sf_count[MCAST_EXCLUDE] &&
2257				!psf->sf_count[MCAST_INCLUDE];
2258		} else
2259			psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
 
 
2260}
2261
 
2262static int sf_setstate(struct ifmcaddr6 *pmc)
2263{
2264	struct ip6_sf_list *psf, *dpsf;
2265	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2266	int qrv = pmc->idev->mc_qrv;
2267	int new_in, rv;
2268
2269	rv = 0;
2270	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2271		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2272			new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2273				!psf->sf_count[MCAST_INCLUDE];
2274		} else
2275			new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2276		if (new_in) {
2277			if (!psf->sf_oldin) {
2278				struct ip6_sf_list *prev = NULL;
2279
2280				for (dpsf = pmc->mca_tomb; dpsf;
2281				     dpsf = dpsf->sf_next) {
2282					if (ipv6_addr_equal(&dpsf->sf_addr,
2283					    &psf->sf_addr))
2284						break;
2285					prev = dpsf;
2286				}
2287				if (dpsf) {
2288					if (prev)
2289						prev->sf_next = dpsf->sf_next;
 
 
2290					else
2291						pmc->mca_tomb = dpsf->sf_next;
2292					kfree(dpsf);
 
 
2293				}
2294				psf->sf_crcount = qrv;
2295				rv++;
2296			}
2297		} else if (psf->sf_oldin) {
2298			psf->sf_crcount = 0;
2299			/*
2300			 * add or update "delete" records if an active filter
2301			 * is now inactive
2302			 */
2303			for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
 
2304				if (ipv6_addr_equal(&dpsf->sf_addr,
2305				    &psf->sf_addr))
2306					break;
2307			if (!dpsf) {
2308				dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2309				if (!dpsf)
2310					continue;
2311				*dpsf = *psf;
2312				/* pmc->mca_lock held by callers */
2313				dpsf->sf_next = pmc->mca_tomb;
2314				pmc->mca_tomb = dpsf;
2315			}
2316			dpsf->sf_crcount = qrv;
2317			rv++;
2318		}
2319	}
2320	return rv;
2321}
2322
2323/*
2324 * Add multicast source filter list to the interface list
 
2325 */
2326static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2327			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2328			  int delta)
2329{
2330	struct ifmcaddr6 *pmc;
2331	int	isexclude;
2332	int	i, err;
2333
2334	if (!idev)
2335		return -ENODEV;
2336	read_lock_bh(&idev->lock);
2337	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2338		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2339			break;
2340	}
2341	if (!pmc) {
2342		/* MCA not found?? bug */
2343		read_unlock_bh(&idev->lock);
2344		return -ESRCH;
2345	}
2346	spin_lock_bh(&pmc->mca_lock);
2347
2348	sf_markstate(pmc);
2349	isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2350	if (!delta)
2351		pmc->mca_sfcount[sfmode]++;
2352	err = 0;
2353	for (i = 0; i < sfcount; i++) {
2354		err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2355		if (err)
2356			break;
2357	}
2358	if (err) {
2359		int j;
2360
2361		if (!delta)
2362			pmc->mca_sfcount[sfmode]--;
2363		for (j = 0; j < i; j++)
2364			ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2365	} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2366		struct ip6_sf_list *psf;
2367
2368		/* filter mode change */
2369		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2370			pmc->mca_sfmode = MCAST_EXCLUDE;
2371		else if (pmc->mca_sfcount[MCAST_INCLUDE])
2372			pmc->mca_sfmode = MCAST_INCLUDE;
2373		/* else no filters; keep old mode for reports */
2374
2375		pmc->mca_crcount = idev->mc_qrv;
2376		idev->mc_ifc_count = pmc->mca_crcount;
2377		for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2378			psf->sf_crcount = 0;
2379		mld_ifc_event(idev);
2380	} else if (sf_setstate(pmc))
2381		mld_ifc_event(idev);
2382	spin_unlock_bh(&pmc->mca_lock);
2383	read_unlock_bh(&idev->lock);
2384	return err;
2385}
2386
 
2387static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2388{
2389	struct ip6_sf_list *psf, *nextpsf;
2390
2391	for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
2392		nextpsf = psf->sf_next;
2393		kfree(psf);
2394	}
2395	pmc->mca_tomb = NULL;
2396	for (psf = pmc->mca_sources; psf; psf = nextpsf) {
2397		nextpsf = psf->sf_next;
2398		kfree(psf);
 
 
 
 
2399	}
2400	pmc->mca_sources = NULL;
2401	pmc->mca_sfmode = MCAST_EXCLUDE;
2402	pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2403	pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2404}
2405
2406
2407static void igmp6_join_group(struct ifmcaddr6 *ma)
2408{
2409	unsigned long delay;
2410
2411	if (ma->mca_flags & MAF_NOREPORT)
2412		return;
2413
2414	igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2415
2416	delay = prandom_u32() % unsolicited_report_interval(ma->idev);
2417
2418	spin_lock_bh(&ma->mca_lock);
2419	if (del_timer(&ma->mca_timer)) {
2420		refcount_dec(&ma->mca_refcnt);
2421		delay = ma->mca_timer.expires - jiffies;
2422	}
2423
2424	if (!mod_timer(&ma->mca_timer, jiffies + delay))
2425		refcount_inc(&ma->mca_refcnt);
2426	ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2427	spin_unlock_bh(&ma->mca_lock);
2428}
2429
2430static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2431			    struct inet6_dev *idev)
2432{
 
2433	int err;
2434
2435	write_lock_bh(&iml->sflock);
2436	if (!iml->sflist) {
 
 
 
 
2437		/* any-source empty exclude case */
2438		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2439	} else {
2440		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2441				iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2442		sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2443		iml->sflist = NULL;
 
 
2444	}
2445	write_unlock_bh(&iml->sflock);
 
 
 
2446	return err;
2447}
2448
 
2449static void igmp6_leave_group(struct ifmcaddr6 *ma)
2450{
2451	if (mld_in_v1_mode(ma->idev)) {
2452		if (ma->mca_flags & MAF_LAST_REPORTER)
2453			igmp6_send(&ma->mca_addr, ma->idev->dev,
2454				ICMPV6_MGM_REDUCTION);
 
2455	} else {
2456		mld_add_delrec(ma->idev, ma);
2457		mld_ifc_event(ma->idev);
2458	}
2459}
2460
2461static void mld_gq_timer_expire(struct timer_list *t)
2462{
2463	struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
 
 
2464
2465	idev->mc_gq_running = 0;
2466	mld_send_report(idev, NULL);
 
 
 
2467	in6_dev_put(idev);
2468}
2469
2470static void mld_ifc_timer_expire(struct timer_list *t)
2471{
2472	struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
 
 
2473
 
2474	mld_send_cr(idev);
 
2475	if (idev->mc_ifc_count) {
2476		idev->mc_ifc_count--;
2477		if (idev->mc_ifc_count)
2478			mld_ifc_start_timer(idev,
2479					    unsolicited_report_interval(idev));
2480	}
 
2481	in6_dev_put(idev);
2482}
2483
 
2484static void mld_ifc_event(struct inet6_dev *idev)
2485{
2486	if (mld_in_v1_mode(idev))
2487		return;
 
2488	idev->mc_ifc_count = idev->mc_qrv;
2489	mld_ifc_start_timer(idev, 1);
2490}
2491
2492static void igmp6_timer_handler(struct timer_list *t)
2493{
2494	struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
 
2495
 
2496	if (mld_in_v1_mode(ma->idev))
2497		igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2498	else
2499		mld_send_report(ma->idev, ma);
2500
2501	spin_lock(&ma->mca_lock);
2502	ma->mca_flags |=  MAF_LAST_REPORTER;
2503	ma->mca_flags &= ~MAF_TIMER_RUNNING;
2504	spin_unlock(&ma->mca_lock);
 
2505	ma_put(ma);
2506}
2507
2508/* Device changing type */
2509
2510void ipv6_mc_unmap(struct inet6_dev *idev)
2511{
2512	struct ifmcaddr6 *i;
2513
2514	/* Install multicast list, except for all-nodes (already installed) */
2515
2516	read_lock_bh(&idev->lock);
2517	for (i = idev->mc_list; i; i = i->next)
2518		igmp6_group_dropped(i);
2519	read_unlock_bh(&idev->lock);
2520}
2521
2522void ipv6_mc_remap(struct inet6_dev *idev)
2523{
2524	ipv6_mc_up(idev);
2525}
2526
2527/* Device going down */
2528
2529void ipv6_mc_down(struct inet6_dev *idev)
2530{
2531	struct ifmcaddr6 *i;
2532
 
2533	/* Withdraw multicast list */
2534
2535	read_lock_bh(&idev->lock);
2536
2537	for (i = idev->mc_list; i; i = i->next)
2538		igmp6_group_dropped(i);
 
2539
2540	/* Should stop timer after group drop. or we will
2541	 * start timer again in mld_ifc_event()
2542	 */
2543	mld_ifc_stop_timer(idev);
2544	mld_gq_stop_timer(idev);
2545	mld_dad_stop_timer(idev);
2546	read_unlock_bh(&idev->lock);
 
 
 
 
 
2547}
2548
2549static void ipv6_mc_reset(struct inet6_dev *idev)
2550{
2551	idev->mc_qrv = sysctl_mld_qrv;
2552	idev->mc_qi = MLD_QI_DEFAULT;
2553	idev->mc_qri = MLD_QRI_DEFAULT;
2554	idev->mc_v1_seen = 0;
2555	idev->mc_maxdelay = unsolicited_report_interval(idev);
2556}
2557
2558/* Device going up */
2559
2560void ipv6_mc_up(struct inet6_dev *idev)
2561{
2562	struct ifmcaddr6 *i;
2563
2564	/* Install multicast list, except for all-nodes (already installed) */
2565
2566	read_lock_bh(&idev->lock);
2567	ipv6_mc_reset(idev);
2568	for (i = idev->mc_list; i; i = i->next) {
 
2569		mld_del_delrec(idev, i);
2570		igmp6_group_added(i);
2571	}
2572	read_unlock_bh(&idev->lock);
2573}
2574
2575/* IPv6 device initialization. */
2576
2577void ipv6_mc_init_dev(struct inet6_dev *idev)
2578{
2579	write_lock_bh(&idev->lock);
2580	spin_lock_init(&idev->mc_lock);
2581	idev->mc_gq_running = 0;
2582	timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
2583	idev->mc_tomb = NULL;
2584	idev->mc_ifc_count = 0;
2585	timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
2586	timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
 
 
 
 
 
 
 
2587	ipv6_mc_reset(idev);
2588	write_unlock_bh(&idev->lock);
2589}
2590
2591/*
2592 *	Device is about to be destroyed: clean up.
2593 */
2594
2595void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2596{
2597	struct ifmcaddr6 *i;
2598
2599	/* Deactivate timers */
2600	ipv6_mc_down(idev);
 
2601	mld_clear_delrec(idev);
 
 
 
2602
2603	/* Delete all-nodes address. */
2604	/* We cannot call ipv6_dev_mc_dec() directly, our caller in
2605	 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2606	 * fail.
2607	 */
2608	__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2609
2610	if (idev->cnf.forwarding)
2611		__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2612
2613	write_lock_bh(&idev->lock);
2614	while ((i = idev->mc_list) != NULL) {
2615		idev->mc_list = i->next;
2616
2617		write_unlock_bh(&idev->lock);
2618		ip6_mc_clear_src(i);
2619		ma_put(i);
2620		write_lock_bh(&idev->lock);
2621	}
2622	write_unlock_bh(&idev->lock);
2623}
2624
2625static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2626{
2627	struct ifmcaddr6 *pmc;
2628
2629	ASSERT_RTNL();
2630
 
2631	if (mld_in_v1_mode(idev)) {
2632		read_lock_bh(&idev->lock);
2633		for (pmc = idev->mc_list; pmc; pmc = pmc->next)
2634			igmp6_join_group(pmc);
2635		read_unlock_bh(&idev->lock);
2636	} else
2637		mld_send_report(idev, NULL);
 
 
2638}
2639
2640static int ipv6_mc_netdev_event(struct notifier_block *this,
2641				unsigned long event,
2642				void *ptr)
2643{
2644	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2645	struct inet6_dev *idev = __in6_dev_get(dev);
2646
2647	switch (event) {
2648	case NETDEV_RESEND_IGMP:
2649		if (idev)
2650			ipv6_mc_rejoin_groups(idev);
2651		break;
2652	default:
2653		break;
2654	}
2655
2656	return NOTIFY_DONE;
2657}
2658
2659static struct notifier_block igmp6_netdev_notifier = {
2660	.notifier_call = ipv6_mc_netdev_event,
2661};
2662
2663#ifdef CONFIG_PROC_FS
2664struct igmp6_mc_iter_state {
2665	struct seq_net_private p;
2666	struct net_device *dev;
2667	struct inet6_dev *idev;
2668};
2669
2670#define igmp6_mc_seq_private(seq)	((struct igmp6_mc_iter_state *)(seq)->private)
2671
2672static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2673{
2674	struct ifmcaddr6 *im = NULL;
2675	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2676	struct net *net = seq_file_net(seq);
2677
2678	state->idev = NULL;
2679	for_each_netdev_rcu(net, state->dev) {
2680		struct inet6_dev *idev;
2681		idev = __in6_dev_get(state->dev);
2682		if (!idev)
2683			continue;
2684		read_lock_bh(&idev->lock);
2685		im = idev->mc_list;
2686		if (im) {
2687			state->idev = idev;
2688			break;
2689		}
2690		read_unlock_bh(&idev->lock);
2691	}
2692	return im;
2693}
2694
2695static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2696{
2697	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2698
2699	im = im->next;
2700	while (!im) {
2701		if (likely(state->idev))
2702			read_unlock_bh(&state->idev->lock);
2703
2704		state->dev = next_net_device_rcu(state->dev);
2705		if (!state->dev) {
2706			state->idev = NULL;
2707			break;
2708		}
2709		state->idev = __in6_dev_get(state->dev);
2710		if (!state->idev)
2711			continue;
2712		read_lock_bh(&state->idev->lock);
2713		im = state->idev->mc_list;
2714	}
2715	return im;
2716}
2717
2718static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2719{
2720	struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2721	if (im)
2722		while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2723			--pos;
2724	return pos ? NULL : im;
2725}
2726
2727static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2728	__acquires(RCU)
2729{
2730	rcu_read_lock();
2731	return igmp6_mc_get_idx(seq, *pos);
2732}
2733
2734static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2735{
2736	struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2737
2738	++*pos;
2739	return im;
2740}
2741
2742static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2743	__releases(RCU)
2744{
2745	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2746
2747	if (likely(state->idev)) {
2748		read_unlock_bh(&state->idev->lock);
2749		state->idev = NULL;
2750	}
2751	state->dev = NULL;
2752	rcu_read_unlock();
2753}
2754
2755static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2756{
2757	struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2758	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2759
2760	seq_printf(seq,
2761		   "%-4d %-15s %pi6 %5d %08X %ld\n",
2762		   state->dev->ifindex, state->dev->name,
2763		   &im->mca_addr,
2764		   im->mca_users, im->mca_flags,
2765		   (im->mca_flags&MAF_TIMER_RUNNING) ?
2766		   jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2767	return 0;
2768}
2769
2770static const struct seq_operations igmp6_mc_seq_ops = {
2771	.start	=	igmp6_mc_seq_start,
2772	.next	=	igmp6_mc_seq_next,
2773	.stop	=	igmp6_mc_seq_stop,
2774	.show	=	igmp6_mc_seq_show,
2775};
2776
2777struct igmp6_mcf_iter_state {
2778	struct seq_net_private p;
2779	struct net_device *dev;
2780	struct inet6_dev *idev;
2781	struct ifmcaddr6 *im;
2782};
2783
2784#define igmp6_mcf_seq_private(seq)	((struct igmp6_mcf_iter_state *)(seq)->private)
2785
2786static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2787{
2788	struct ip6_sf_list *psf = NULL;
2789	struct ifmcaddr6 *im = NULL;
2790	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2791	struct net *net = seq_file_net(seq);
2792
2793	state->idev = NULL;
2794	state->im = NULL;
2795	for_each_netdev_rcu(net, state->dev) {
2796		struct inet6_dev *idev;
2797		idev = __in6_dev_get(state->dev);
2798		if (unlikely(idev == NULL))
2799			continue;
2800		read_lock_bh(&idev->lock);
2801		im = idev->mc_list;
2802		if (likely(im)) {
2803			spin_lock_bh(&im->mca_lock);
2804			psf = im->mca_sources;
2805			if (likely(psf)) {
2806				state->im = im;
2807				state->idev = idev;
2808				break;
2809			}
2810			spin_unlock_bh(&im->mca_lock);
2811		}
2812		read_unlock_bh(&idev->lock);
2813	}
2814	return psf;
2815}
2816
2817static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2818{
2819	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2820
2821	psf = psf->sf_next;
2822	while (!psf) {
2823		spin_unlock_bh(&state->im->mca_lock);
2824		state->im = state->im->next;
2825		while (!state->im) {
2826			if (likely(state->idev))
2827				read_unlock_bh(&state->idev->lock);
2828
2829			state->dev = next_net_device_rcu(state->dev);
2830			if (!state->dev) {
2831				state->idev = NULL;
2832				goto out;
2833			}
2834			state->idev = __in6_dev_get(state->dev);
2835			if (!state->idev)
2836				continue;
2837			read_lock_bh(&state->idev->lock);
2838			state->im = state->idev->mc_list;
2839		}
2840		if (!state->im)
2841			break;
2842		spin_lock_bh(&state->im->mca_lock);
2843		psf = state->im->mca_sources;
2844	}
2845out:
2846	return psf;
2847}
2848
2849static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2850{
2851	struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2852	if (psf)
2853		while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2854			--pos;
2855	return pos ? NULL : psf;
2856}
2857
2858static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2859	__acquires(RCU)
2860{
2861	rcu_read_lock();
2862	return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2863}
2864
2865static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2866{
2867	struct ip6_sf_list *psf;
2868	if (v == SEQ_START_TOKEN)
2869		psf = igmp6_mcf_get_first(seq);
2870	else
2871		psf = igmp6_mcf_get_next(seq, v);
2872	++*pos;
2873	return psf;
2874}
2875
2876static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2877	__releases(RCU)
2878{
2879	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2880	if (likely(state->im)) {
2881		spin_unlock_bh(&state->im->mca_lock);
2882		state->im = NULL;
2883	}
2884	if (likely(state->idev)) {
2885		read_unlock_bh(&state->idev->lock);
2886		state->idev = NULL;
2887	}
2888	state->dev = NULL;
2889	rcu_read_unlock();
2890}
2891
2892static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2893{
2894	struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2895	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2896
2897	if (v == SEQ_START_TOKEN) {
2898		seq_puts(seq, "Idx Device                Multicast Address                   Source Address    INC    EXC\n");
2899	} else {
2900		seq_printf(seq,
2901			   "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2902			   state->dev->ifindex, state->dev->name,
2903			   &state->im->mca_addr,
2904			   &psf->sf_addr,
2905			   psf->sf_count[MCAST_INCLUDE],
2906			   psf->sf_count[MCAST_EXCLUDE]);
2907	}
2908	return 0;
2909}
2910
2911static const struct seq_operations igmp6_mcf_seq_ops = {
2912	.start	=	igmp6_mcf_seq_start,
2913	.next	=	igmp6_mcf_seq_next,
2914	.stop	=	igmp6_mcf_seq_stop,
2915	.show	=	igmp6_mcf_seq_show,
2916};
2917
2918static int __net_init igmp6_proc_init(struct net *net)
2919{
2920	int err;
2921
2922	err = -ENOMEM;
2923	if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
2924			sizeof(struct igmp6_mc_iter_state)))
2925		goto out;
2926	if (!proc_create_net("mcfilter6", 0444, net->proc_net,
2927			&igmp6_mcf_seq_ops,
2928			sizeof(struct igmp6_mcf_iter_state)))
2929		goto out_proc_net_igmp6;
2930
2931	err = 0;
2932out:
2933	return err;
2934
2935out_proc_net_igmp6:
2936	remove_proc_entry("igmp6", net->proc_net);
2937	goto out;
2938}
2939
2940static void __net_exit igmp6_proc_exit(struct net *net)
2941{
2942	remove_proc_entry("mcfilter6", net->proc_net);
2943	remove_proc_entry("igmp6", net->proc_net);
2944}
2945#else
2946static inline int igmp6_proc_init(struct net *net)
2947{
2948	return 0;
2949}
2950static inline void igmp6_proc_exit(struct net *net)
2951{
2952}
2953#endif
2954
2955static int __net_init igmp6_net_init(struct net *net)
2956{
2957	int err;
2958
2959	err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2960				   SOCK_RAW, IPPROTO_ICMPV6, net);
2961	if (err < 0) {
2962		pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2963		       err);
2964		goto out;
2965	}
2966
2967	inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
 
2968
2969	err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
2970				   SOCK_RAW, IPPROTO_ICMPV6, net);
2971	if (err < 0) {
2972		pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
2973		       err);
2974		goto out_sock_create;
2975	}
2976
2977	err = igmp6_proc_init(net);
2978	if (err)
2979		goto out_sock_create_autojoin;
2980
2981	return 0;
2982
2983out_sock_create_autojoin:
2984	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2985out_sock_create:
2986	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2987out:
2988	return err;
2989}
2990
2991static void __net_exit igmp6_net_exit(struct net *net)
2992{
2993	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2994	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2995	igmp6_proc_exit(net);
2996}
2997
2998static struct pernet_operations igmp6_net_ops = {
2999	.init = igmp6_net_init,
3000	.exit = igmp6_net_exit,
3001};
3002
3003int __init igmp6_init(void)
3004{
3005	return register_pernet_subsys(&igmp6_net_ops);
 
 
 
 
 
 
 
 
 
 
 
 
3006}
3007
3008int __init igmp6_late_init(void)
3009{
3010	return register_netdevice_notifier(&igmp6_netdev_notifier);
3011}
3012
3013void igmp6_cleanup(void)
3014{
3015	unregister_pernet_subsys(&igmp6_net_ops);
 
3016}
3017
3018void igmp6_late_cleanup(void)
3019{
3020	unregister_netdevice_notifier(&igmp6_netdev_notifier);
3021}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Multicast support for IPv6
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
  10 */
  11
  12/* Changes:
  13 *
  14 *	yoshfuji	: fix format of router-alert option
  15 *	YOSHIFUJI Hideaki @USAGI:
  16 *		Fixed source address for MLD message based on
  17 *		<draft-ietf-magma-mld-source-05.txt>.
  18 *	YOSHIFUJI Hideaki @USAGI:
  19 *		- Ignore Queries for invalid addresses.
  20 *		- MLD for link-local addresses.
  21 *	David L Stevens <dlstevens@us.ibm.com>:
  22 *		- MLDv2 support
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/errno.h>
  27#include <linux/types.h>
  28#include <linux/string.h>
  29#include <linux/socket.h>
  30#include <linux/sockios.h>
  31#include <linux/jiffies.h>
 
  32#include <linux/net.h>
  33#include <linux/in.h>
  34#include <linux/in6.h>
  35#include <linux/netdevice.h>
  36#include <linux/if_arp.h>
  37#include <linux/route.h>
  38#include <linux/init.h>
  39#include <linux/proc_fs.h>
  40#include <linux/seq_file.h>
  41#include <linux/slab.h>
  42#include <linux/pkt_sched.h>
  43#include <net/mld.h>
  44#include <linux/workqueue.h>
  45
  46#include <linux/netfilter.h>
  47#include <linux/netfilter_ipv6.h>
  48
  49#include <net/net_namespace.h>
  50#include <net/sock.h>
  51#include <net/snmp.h>
  52
  53#include <net/ipv6.h>
  54#include <net/protocol.h>
  55#include <net/if_inet6.h>
  56#include <net/ndisc.h>
  57#include <net/addrconf.h>
  58#include <net/ip6_route.h>
  59#include <net/inet_common.h>
  60
  61#include <net/ip6_checksum.h>
  62
  63/* Ensure that we have struct in6_addr aligned on 32bit word. */
  64static int __mld2_query_bugs[] __attribute__((__unused__)) = {
  65	BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
  66	BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
  67	BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
  68};
  69
  70static struct workqueue_struct *mld_wq;
  71static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
  72
  73static void igmp6_join_group(struct ifmcaddr6 *ma);
  74static void igmp6_leave_group(struct ifmcaddr6 *ma);
  75static void mld_mca_work(struct work_struct *work);
  76
 
 
  77static void mld_ifc_event(struct inet6_dev *idev);
 
 
 
  78static bool mld_in_v1_mode(const struct inet6_dev *idev);
  79static int sf_setstate(struct ifmcaddr6 *pmc);
  80static void sf_markstate(struct ifmcaddr6 *pmc);
  81static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
  82static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  83			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
  84			  int delta);
  85static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  86			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
  87			  int delta);
  88static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
  89			    struct inet6_dev *idev);
  90static int __ipv6_dev_mc_inc(struct net_device *dev,
  91			     const struct in6_addr *addr, unsigned int mode);
  92
  93#define MLD_QRV_DEFAULT		2
  94/* RFC3810, 9.2. Query Interval */
  95#define MLD_QI_DEFAULT		(125 * HZ)
  96/* RFC3810, 9.3. Query Response Interval */
  97#define MLD_QRI_DEFAULT		(10 * HZ)
  98
  99/* RFC3810, 8.1 Query Version Distinctions */
 100#define MLD_V1_QUERY_LEN	24
 101#define MLD_V2_QUERY_LEN_MIN	28
 102
 103#define IPV6_MLD_MAX_MSF	64
 104
 105int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
 106int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
 107
 108/*
 109 *	socket join on multicast group
 110 */
 111#define mc_dereference(e, idev) \
 112	rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
 113
 114#define sock_dereference(e, sk) \
 115	rcu_dereference_protected(e, lockdep_sock_is_held(sk))
 116
 117#define for_each_pmc_socklock(np, sk, pmc)			\
 118	for (pmc = sock_dereference((np)->ipv6_mc_list, sk);	\
 119	     pmc;						\
 120	     pmc = sock_dereference(pmc->next, sk))
 121
 122#define for_each_pmc_rcu(np, pmc)				\
 123	for (pmc = rcu_dereference((np)->ipv6_mc_list);		\
 124	     pmc;						\
 125	     pmc = rcu_dereference(pmc->next))
 126
 127#define for_each_psf_mclock(mc, psf)				\
 128	for (psf = mc_dereference((mc)->mca_sources, mc->idev);	\
 129	     psf;						\
 130	     psf = mc_dereference(psf->sf_next, mc->idev))
 131
 132#define for_each_psf_rcu(mc, psf)				\
 133	for (psf = rcu_dereference((mc)->mca_sources);		\
 134	     psf;						\
 135	     psf = rcu_dereference(psf->sf_next))
 136
 137#define for_each_psf_tomb(mc, psf)				\
 138	for (psf = mc_dereference((mc)->mca_tomb, mc->idev);	\
 139	     psf;						\
 140	     psf = mc_dereference(psf->sf_next, mc->idev))
 141
 142#define for_each_mc_mclock(idev, mc)				\
 143	for (mc = mc_dereference((idev)->mc_list, idev);	\
 144	     mc;						\
 145	     mc = mc_dereference(mc->next, idev))
 146
 147#define for_each_mc_rcu(idev, mc)				\
 148	for (mc = rcu_dereference((idev)->mc_list);             \
 149	     mc;                                                \
 150	     mc = rcu_dereference(mc->next))
 151
 152#define for_each_mc_tomb(idev, mc)				\
 153	for (mc = mc_dereference((idev)->mc_tomb, idev);	\
 154	     mc;						\
 155	     mc = mc_dereference(mc->next, idev))
 156
 157static int unsolicited_report_interval(struct inet6_dev *idev)
 158{
 159	int iv;
 160
 161	if (mld_in_v1_mode(idev))
 162		iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
 163	else
 164		iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
 165
 166	return iv > 0 ? iv : 1;
 167}
 168
 169static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
 170			       const struct in6_addr *addr, unsigned int mode)
 171{
 172	struct net_device *dev = NULL;
 173	struct ipv6_mc_socklist *mc_lst;
 174	struct ipv6_pinfo *np = inet6_sk(sk);
 175	struct net *net = sock_net(sk);
 176	int err;
 177
 178	ASSERT_RTNL();
 179
 180	if (!ipv6_addr_is_multicast(addr))
 181		return -EINVAL;
 182
 183	for_each_pmc_socklock(np, sk, mc_lst) {
 
 184		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 185		    ipv6_addr_equal(&mc_lst->addr, addr))
 
 186			return -EADDRINUSE;
 
 187	}
 
 188
 189	mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
 190
 191	if (!mc_lst)
 192		return -ENOMEM;
 193
 194	mc_lst->next = NULL;
 195	mc_lst->addr = *addr;
 196
 197	if (ifindex == 0) {
 198		struct rt6_info *rt;
 199		rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
 200		if (rt) {
 201			dev = rt->dst.dev;
 202			ip6_rt_put(rt);
 203		}
 204	} else
 205		dev = __dev_get_by_index(net, ifindex);
 206
 207	if (!dev) {
 208		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 209		return -ENODEV;
 210	}
 211
 212	mc_lst->ifindex = dev->ifindex;
 213	mc_lst->sfmode = mode;
 214	RCU_INIT_POINTER(mc_lst->sflist, NULL);
 
 215
 216	/*
 217	 *	now add/increase the group membership on the device
 218	 */
 219
 220	err = __ipv6_dev_mc_inc(dev, addr, mode);
 221
 222	if (err) {
 223		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 224		return err;
 225	}
 226
 227	mc_lst->next = np->ipv6_mc_list;
 228	rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
 229
 230	return 0;
 231}
 232
 233int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 234{
 235	return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
 236}
 237EXPORT_SYMBOL(ipv6_sock_mc_join);
 238
 239int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
 240			  const struct in6_addr *addr, unsigned int mode)
 241{
 242	return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
 243}
 244
 245/*
 246 *	socket leave on multicast group
 247 */
 248int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
 249{
 250	struct ipv6_pinfo *np = inet6_sk(sk);
 251	struct ipv6_mc_socklist *mc_lst;
 252	struct ipv6_mc_socklist __rcu **lnk;
 253	struct net *net = sock_net(sk);
 254
 255	ASSERT_RTNL();
 256
 257	if (!ipv6_addr_is_multicast(addr))
 258		return -EINVAL;
 259
 260	for (lnk = &np->ipv6_mc_list;
 261	     (mc_lst = sock_dereference(*lnk, sk)) != NULL;
 262	      lnk = &mc_lst->next) {
 263		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 264		    ipv6_addr_equal(&mc_lst->addr, addr)) {
 265			struct net_device *dev;
 266
 267			*lnk = mc_lst->next;
 268
 269			dev = __dev_get_by_index(net, mc_lst->ifindex);
 270			if (dev) {
 271				struct inet6_dev *idev = __in6_dev_get(dev);
 272
 273				ip6_mc_leave_src(sk, mc_lst, idev);
 274				if (idev)
 275					__ipv6_dev_mc_dec(idev, &mc_lst->addr);
 276			} else {
 277				ip6_mc_leave_src(sk, mc_lst, NULL);
 278			}
 279
 280			atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
 281			kfree_rcu(mc_lst, rcu);
 282			return 0;
 283		}
 284	}
 285
 286	return -EADDRNOTAVAIL;
 287}
 288EXPORT_SYMBOL(ipv6_sock_mc_drop);
 289
 290static struct inet6_dev *ip6_mc_find_dev_rtnl(struct net *net,
 291					      const struct in6_addr *group,
 292					      int ifindex)
 
 293{
 294	struct net_device *dev = NULL;
 295	struct inet6_dev *idev = NULL;
 296
 297	if (ifindex == 0) {
 298		struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
 299
 300		if (rt) {
 301			dev = rt->dst.dev;
 302			ip6_rt_put(rt);
 303		}
 304	} else {
 305		dev = __dev_get_by_index(net, ifindex);
 306	}
 307
 308	if (!dev)
 309		return NULL;
 310	idev = __in6_dev_get(dev);
 311	if (!idev)
 312		return NULL;
 313	if (idev->dead)
 
 
 314		return NULL;
 
 315	return idev;
 316}
 317
 318void __ipv6_sock_mc_close(struct sock *sk)
 319{
 320	struct ipv6_pinfo *np = inet6_sk(sk);
 321	struct ipv6_mc_socklist *mc_lst;
 322	struct net *net = sock_net(sk);
 323
 324	ASSERT_RTNL();
 325
 326	while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
 327		struct net_device *dev;
 328
 329		np->ipv6_mc_list = mc_lst->next;
 330
 331		dev = __dev_get_by_index(net, mc_lst->ifindex);
 332		if (dev) {
 333			struct inet6_dev *idev = __in6_dev_get(dev);
 334
 335			ip6_mc_leave_src(sk, mc_lst, idev);
 336			if (idev)
 337				__ipv6_dev_mc_dec(idev, &mc_lst->addr);
 338		} else {
 339			ip6_mc_leave_src(sk, mc_lst, NULL);
 340		}
 341
 342		atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
 343		kfree_rcu(mc_lst, rcu);
 344	}
 345}
 346
 347void ipv6_sock_mc_close(struct sock *sk)
 348{
 349	struct ipv6_pinfo *np = inet6_sk(sk);
 350
 351	if (!rcu_access_pointer(np->ipv6_mc_list))
 352		return;
 353
 354	rtnl_lock();
 355	lock_sock(sk);
 356	__ipv6_sock_mc_close(sk);
 357	release_sock(sk);
 358	rtnl_unlock();
 359}
 360
 361int ip6_mc_source(int add, int omode, struct sock *sk,
 362	struct group_source_req *pgsr)
 363{
 364	struct in6_addr *source, *group;
 365	struct ipv6_mc_socklist *pmc;
 366	struct inet6_dev *idev;
 367	struct ipv6_pinfo *inet6 = inet6_sk(sk);
 368	struct ip6_sf_socklist *psl;
 369	struct net *net = sock_net(sk);
 370	int i, j, rv;
 371	int leavegroup = 0;
 
 372	int err;
 373
 374	source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
 375	group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
 376
 377	if (!ipv6_addr_is_multicast(group))
 378		return -EINVAL;
 379
 380	idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface);
 381	if (!idev)
 
 
 382		return -ENODEV;
 
 383
 384	err = -EADDRNOTAVAIL;
 385
 386	mutex_lock(&idev->mc_lock);
 387	for_each_pmc_socklock(inet6, sk, pmc) {
 388		if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
 389			continue;
 390		if (ipv6_addr_equal(&pmc->addr, group))
 391			break;
 392	}
 393	if (!pmc) {		/* must have a prior join */
 394		err = -EINVAL;
 395		goto done;
 396	}
 397	/* if a source filter was set, must be the same mode as before */
 398	if (rcu_access_pointer(pmc->sflist)) {
 399		if (pmc->sfmode != omode) {
 400			err = -EINVAL;
 401			goto done;
 402		}
 403	} else if (pmc->sfmode != omode) {
 404		/* allow mode switches for empty-set filters */
 405		ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
 406		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
 407		pmc->sfmode = omode;
 408	}
 409
 410	psl = sock_dereference(pmc->sflist, sk);
 
 
 
 411	if (!add) {
 412		if (!psl)
 413			goto done;	/* err = -EADDRNOTAVAIL */
 414		rv = !0;
 415		for (i = 0; i < psl->sl_count; i++) {
 416			rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
 417			if (rv == 0)
 418				break;
 419		}
 420		if (rv)		/* source not found */
 421			goto done;	/* err = -EADDRNOTAVAIL */
 422
 423		/* special case - (INCLUDE, empty) == LEAVE_GROUP */
 424		if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
 425			leavegroup = 1;
 426			goto done;
 427		}
 428
 429		/* update the interface filter */
 430		ip6_mc_del_src(idev, group, omode, 1, source, 1);
 431
 432		for (j = i+1; j < psl->sl_count; j++)
 433			psl->sl_addr[j-1] = psl->sl_addr[j];
 434		psl->sl_count--;
 435		err = 0;
 436		goto done;
 437	}
 438	/* else, add a new source to the filter */
 439
 440	if (psl && psl->sl_count >= sysctl_mld_max_msf) {
 441		err = -ENOBUFS;
 442		goto done;
 443	}
 444	if (!psl || psl->sl_count == psl->sl_max) {
 445		struct ip6_sf_socklist *newpsl;
 446		int count = IP6_SFBLOCK;
 447
 448		if (psl)
 449			count += psl->sl_max;
 450		newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count),
 451				      GFP_KERNEL);
 452		if (!newpsl) {
 453			err = -ENOBUFS;
 454			goto done;
 455		}
 456		newpsl->sl_max = count;
 457		newpsl->sl_count = count - IP6_SFBLOCK;
 458		if (psl) {
 459			for (i = 0; i < psl->sl_count; i++)
 460				newpsl->sl_addr[i] = psl->sl_addr[i];
 461			atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
 462				   &sk->sk_omem_alloc);
 463		}
 464		rcu_assign_pointer(pmc->sflist, newpsl);
 465		kfree_rcu(psl, rcu);
 466		psl = newpsl;
 467	}
 468	rv = 1;	/* > 0 for insert logic below if sl_count is 0 */
 469	for (i = 0; i < psl->sl_count; i++) {
 470		rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
 471		if (rv == 0) /* There is an error in the address. */
 472			goto done;
 473	}
 474	for (j = psl->sl_count-1; j >= i; j--)
 475		psl->sl_addr[j+1] = psl->sl_addr[j];
 476	psl->sl_addr[i] = *source;
 477	psl->sl_count++;
 478	err = 0;
 479	/* update the interface list */
 480	ip6_mc_add_src(idev, group, omode, 1, source, 1);
 481done:
 482	mutex_unlock(&idev->mc_lock);
 
 
 
 483	if (leavegroup)
 484		err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
 485	return err;
 486}
 487
 488int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
 489		    struct sockaddr_storage *list)
 490{
 491	const struct in6_addr *group;
 492	struct ipv6_mc_socklist *pmc;
 493	struct inet6_dev *idev;
 494	struct ipv6_pinfo *inet6 = inet6_sk(sk);
 495	struct ip6_sf_socklist *newpsl, *psl;
 496	struct net *net = sock_net(sk);
 497	int leavegroup = 0;
 498	int i, err;
 499
 500	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
 501
 502	if (!ipv6_addr_is_multicast(group))
 503		return -EINVAL;
 504	if (gsf->gf_fmode != MCAST_INCLUDE &&
 505	    gsf->gf_fmode != MCAST_EXCLUDE)
 506		return -EINVAL;
 507
 508	idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface);
 509	if (!idev)
 
 
 
 510		return -ENODEV;
 
 511
 512	err = 0;
 513
 514	if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
 515		leavegroup = 1;
 516		goto done;
 517	}
 518
 519	for_each_pmc_socklock(inet6, sk, pmc) {
 520		if (pmc->ifindex != gsf->gf_interface)
 521			continue;
 522		if (ipv6_addr_equal(&pmc->addr, group))
 523			break;
 524	}
 525	if (!pmc) {		/* must have a prior join */
 526		err = -EINVAL;
 527		goto done;
 528	}
 529	if (gsf->gf_numsrc) {
 530		newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr,
 531						      gsf->gf_numsrc),
 532				      GFP_KERNEL);
 533		if (!newpsl) {
 534			err = -ENOBUFS;
 535			goto done;
 536		}
 537		newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
 538		for (i = 0; i < newpsl->sl_count; ++i, ++list) {
 539			struct sockaddr_in6 *psin6;
 540
 541			psin6 = (struct sockaddr_in6 *)list;
 542			newpsl->sl_addr[i] = psin6->sin6_addr;
 543		}
 544		mutex_lock(&idev->mc_lock);
 545		err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
 546				     newpsl->sl_count, newpsl->sl_addr, 0);
 547		if (err) {
 548			mutex_unlock(&idev->mc_lock);
 549			sock_kfree_s(sk, newpsl, struct_size(newpsl, sl_addr,
 550							     newpsl->sl_max));
 551			goto done;
 552		}
 553		mutex_unlock(&idev->mc_lock);
 554	} else {
 555		newpsl = NULL;
 556		mutex_lock(&idev->mc_lock);
 557		ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
 558		mutex_unlock(&idev->mc_lock);
 559	}
 560
 561	mutex_lock(&idev->mc_lock);
 562	psl = sock_dereference(pmc->sflist, sk);
 563	if (psl) {
 564		ip6_mc_del_src(idev, group, pmc->sfmode,
 565			       psl->sl_count, psl->sl_addr, 0);
 566		atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
 567			   &sk->sk_omem_alloc);
 568	} else {
 569		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
 570	}
 571	rcu_assign_pointer(pmc->sflist, newpsl);
 572	mutex_unlock(&idev->mc_lock);
 573	kfree_rcu(psl, rcu);
 574	pmc->sfmode = gsf->gf_fmode;
 
 575	err = 0;
 576done:
 
 
 577	if (leavegroup)
 578		err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
 579	return err;
 580}
 581
 582int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
 583		  sockptr_t optval, size_t ss_offset)
 584{
 585	struct ipv6_pinfo *inet6 = inet6_sk(sk);
 586	const struct in6_addr *group;
 587	struct ipv6_mc_socklist *pmc;
 
 
 588	struct ip6_sf_socklist *psl;
 589	unsigned int count;
 590	int i, copycount;
 591
 592	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
 593
 594	if (!ipv6_addr_is_multicast(group))
 595		return -EINVAL;
 596
 
 
 
 
 
 
 
 
 
 597	/* changes to the ipv6_mc_list require the socket lock and
 598	 * rtnl lock. We have the socket lock, so reading the list is safe.
 
 599	 */
 600
 601	for_each_pmc_socklock(inet6, sk, pmc) {
 602		if (pmc->ifindex != gsf->gf_interface)
 603			continue;
 604		if (ipv6_addr_equal(group, &pmc->addr))
 605			break;
 606	}
 607	if (!pmc)		/* must have a prior join */
 608		return -EADDRNOTAVAIL;
 609
 610	gsf->gf_fmode = pmc->sfmode;
 611	psl = sock_dereference(pmc->sflist, sk);
 612	count = psl ? psl->sl_count : 0;
 
 
 613
 614	copycount = min(count, gsf->gf_numsrc);
 615	gsf->gf_numsrc = count;
 616	for (i = 0; i < copycount; i++) {
 
 
 
 617		struct sockaddr_in6 *psin6;
 618		struct sockaddr_storage ss;
 619
 620		psin6 = (struct sockaddr_in6 *)&ss;
 621		memset(&ss, 0, sizeof(ss));
 622		psin6->sin6_family = AF_INET6;
 623		psin6->sin6_addr = psl->sl_addr[i];
 624		if (copy_to_sockptr_offset(optval, ss_offset, &ss, sizeof(ss)))
 625			return -EFAULT;
 626		ss_offset += sizeof(ss);
 627	}
 628	return 0;
 
 
 
 
 629}
 630
 631bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
 632		    const struct in6_addr *src_addr)
 633{
 634	const struct ipv6_pinfo *np = inet6_sk(sk);
 635	const struct ipv6_mc_socklist *mc;
 636	const struct ip6_sf_socklist *psl;
 637	bool rv = true;
 638
 639	rcu_read_lock();
 640	for_each_pmc_rcu(np, mc) {
 641		if (ipv6_addr_equal(&mc->addr, mc_addr))
 642			break;
 643	}
 644	if (!mc) {
 645		rcu_read_unlock();
 646		return inet6_test_bit(MC6_ALL, sk);
 647	}
 648	psl = rcu_dereference(mc->sflist);
 
 649	if (!psl) {
 650		rv = mc->sfmode == MCAST_EXCLUDE;
 651	} else {
 652		int i;
 653
 654		for (i = 0; i < psl->sl_count; i++) {
 655			if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
 656				break;
 657		}
 658		if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
 659			rv = false;
 660		if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
 661			rv = false;
 662	}
 
 663	rcu_read_unlock();
 664
 665	return rv;
 666}
 667
 668/* called with mc_lock */
 669static void igmp6_group_added(struct ifmcaddr6 *mc)
 670{
 671	struct net_device *dev = mc->idev->dev;
 672	char buf[MAX_ADDR_LEN];
 673
 674	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
 675	    IPV6_ADDR_SCOPE_LINKLOCAL)
 676		return;
 677
 
 678	if (!(mc->mca_flags&MAF_LOADED)) {
 679		mc->mca_flags |= MAF_LOADED;
 680		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
 681			dev_mc_add(dev, buf);
 682	}
 
 683
 684	if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
 685		return;
 686
 687	if (mld_in_v1_mode(mc->idev)) {
 688		igmp6_join_group(mc);
 689		return;
 690	}
 691	/* else v2 */
 692
 693	/* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
 694	 * should not send filter-mode change record as the mode
 695	 * should be from IN() to IN(A).
 696	 */
 697	if (mc->mca_sfmode == MCAST_EXCLUDE)
 698		mc->mca_crcount = mc->idev->mc_qrv;
 699
 700	mld_ifc_event(mc->idev);
 701}
 702
 703/* called with mc_lock */
 704static void igmp6_group_dropped(struct ifmcaddr6 *mc)
 705{
 706	struct net_device *dev = mc->idev->dev;
 707	char buf[MAX_ADDR_LEN];
 708
 709	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
 710	    IPV6_ADDR_SCOPE_LINKLOCAL)
 711		return;
 712
 
 713	if (mc->mca_flags&MAF_LOADED) {
 714		mc->mca_flags &= ~MAF_LOADED;
 715		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
 716			dev_mc_del(dev, buf);
 717	}
 718
 
 719	if (mc->mca_flags & MAF_NOREPORT)
 720		return;
 721
 722	if (!mc->idev->dead)
 723		igmp6_leave_group(mc);
 724
 725	if (cancel_delayed_work(&mc->mca_work))
 
 726		refcount_dec(&mc->mca_refcnt);
 
 727}
 728
 729/*
 730 * deleted ifmcaddr6 manipulation
 731 * called with mc_lock
 732 */
 733static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
 734{
 735	struct ifmcaddr6 *pmc;
 736
 737	/* this is an "ifmcaddr6" for convenience; only the fields below
 738	 * are actually used. In particular, the refcnt and users are not
 739	 * used for management of the delete list. Using the same structure
 740	 * for deleted items allows change reports to use common code with
 741	 * non-deleted or query-response MCA's.
 742	 */
 743	pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
 744	if (!pmc)
 745		return;
 746
 
 
 747	pmc->idev = im->idev;
 748	in6_dev_hold(idev);
 749	pmc->mca_addr = im->mca_addr;
 750	pmc->mca_crcount = idev->mc_qrv;
 751	pmc->mca_sfmode = im->mca_sfmode;
 752	if (pmc->mca_sfmode == MCAST_INCLUDE) {
 753		struct ip6_sf_list *psf;
 754
 755		rcu_assign_pointer(pmc->mca_tomb,
 756				   mc_dereference(im->mca_tomb, idev));
 757		rcu_assign_pointer(pmc->mca_sources,
 758				   mc_dereference(im->mca_sources, idev));
 759		RCU_INIT_POINTER(im->mca_tomb, NULL);
 760		RCU_INIT_POINTER(im->mca_sources, NULL);
 761
 762		for_each_psf_mclock(pmc, psf)
 763			psf->sf_crcount = pmc->mca_crcount;
 764	}
 
 765
 766	rcu_assign_pointer(pmc->next, idev->mc_tomb);
 767	rcu_assign_pointer(idev->mc_tomb, pmc);
 
 
 768}
 769
 770/* called with mc_lock */
 771static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
 772{
 773	struct ip6_sf_list *psf, *sources, *tomb;
 
 774	struct in6_addr *pmca = &im->mca_addr;
 775	struct ifmcaddr6 *pmc, *pmc_prev;
 776
 
 777	pmc_prev = NULL;
 778	for_each_mc_tomb(idev, pmc) {
 779		if (ipv6_addr_equal(&pmc->mca_addr, pmca))
 780			break;
 781		pmc_prev = pmc;
 782	}
 783	if (pmc) {
 784		if (pmc_prev)
 785			rcu_assign_pointer(pmc_prev->next, pmc->next);
 786		else
 787			rcu_assign_pointer(idev->mc_tomb, pmc->next);
 788	}
 
 789
 
 790	if (pmc) {
 791		im->idev = pmc->idev;
 792		if (im->mca_sfmode == MCAST_INCLUDE) {
 793			tomb = rcu_replace_pointer(im->mca_tomb,
 794						   mc_dereference(pmc->mca_tomb, pmc->idev),
 795						   lockdep_is_held(&im->idev->mc_lock));
 796			rcu_assign_pointer(pmc->mca_tomb, tomb);
 797
 798			sources = rcu_replace_pointer(im->mca_sources,
 799						      mc_dereference(pmc->mca_sources, pmc->idev),
 800						      lockdep_is_held(&im->idev->mc_lock));
 801			rcu_assign_pointer(pmc->mca_sources, sources);
 802			for_each_psf_mclock(im, psf)
 803				psf->sf_crcount = idev->mc_qrv;
 804		} else {
 805			im->mca_crcount = idev->mc_qrv;
 806		}
 807		in6_dev_put(pmc->idev);
 808		ip6_mc_clear_src(pmc);
 809		kfree_rcu(pmc, rcu);
 810	}
 
 811}
 812
 813/* called with mc_lock */
 814static void mld_clear_delrec(struct inet6_dev *idev)
 815{
 816	struct ifmcaddr6 *pmc, *nextpmc;
 817
 818	pmc = mc_dereference(idev->mc_tomb, idev);
 819	RCU_INIT_POINTER(idev->mc_tomb, NULL);
 
 
 820
 821	for (; pmc; pmc = nextpmc) {
 822		nextpmc = mc_dereference(pmc->next, idev);
 823		ip6_mc_clear_src(pmc);
 824		in6_dev_put(pmc->idev);
 825		kfree_rcu(pmc, rcu);
 826	}
 827
 828	/* clear dead sources, too */
 829	for_each_mc_mclock(idev, pmc) {
 
 830		struct ip6_sf_list *psf, *psf_next;
 831
 832		psf = mc_dereference(pmc->mca_tomb, idev);
 833		RCU_INIT_POINTER(pmc->mca_tomb, NULL);
 
 
 834		for (; psf; psf = psf_next) {
 835			psf_next = mc_dereference(psf->sf_next, idev);
 836			kfree_rcu(psf, rcu);
 837		}
 838	}
 839}
 840
 841static void mld_clear_query(struct inet6_dev *idev)
 842{
 843	struct sk_buff *skb;
 844
 845	spin_lock_bh(&idev->mc_query_lock);
 846	while ((skb = __skb_dequeue(&idev->mc_query_queue)))
 847		kfree_skb(skb);
 848	spin_unlock_bh(&idev->mc_query_lock);
 849}
 850
 851static void mld_clear_report(struct inet6_dev *idev)
 852{
 853	struct sk_buff *skb;
 854
 855	spin_lock_bh(&idev->mc_report_lock);
 856	while ((skb = __skb_dequeue(&idev->mc_report_queue)))
 857		kfree_skb(skb);
 858	spin_unlock_bh(&idev->mc_report_lock);
 859}
 860
 861static void mca_get(struct ifmcaddr6 *mc)
 862{
 863	refcount_inc(&mc->mca_refcnt);
 864}
 865
 866static void ma_put(struct ifmcaddr6 *mc)
 867{
 868	if (refcount_dec_and_test(&mc->mca_refcnt)) {
 869		in6_dev_put(mc->idev);
 870		kfree_rcu(mc, rcu);
 871	}
 872}
 873
 874/* called with mc_lock */
 875static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
 876				   const struct in6_addr *addr,
 877				   unsigned int mode)
 878{
 879	struct ifmcaddr6 *mc;
 880
 881	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
 882	if (!mc)
 883		return NULL;
 884
 885	INIT_DELAYED_WORK(&mc->mca_work, mld_mca_work);
 886
 887	mc->mca_addr = *addr;
 888	mc->idev = idev; /* reference taken by caller */
 889	mc->mca_users = 1;
 890	/* mca_stamp should be updated upon changes */
 891	mc->mca_cstamp = mc->mca_tstamp = jiffies;
 892	refcount_set(&mc->mca_refcnt, 1);
 
 893
 894	mc->mca_sfmode = mode;
 895	mc->mca_sfcount[mode] = 1;
 896
 897	if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
 898	    IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
 899		mc->mca_flags |= MAF_NOREPORT;
 900
 901	return mc;
 902}
 903
 904/*
 905 *	device multicast group inc (add if not found)
 906 */
 907static int __ipv6_dev_mc_inc(struct net_device *dev,
 908			     const struct in6_addr *addr, unsigned int mode)
 909{
 910	struct ifmcaddr6 *mc;
 911	struct inet6_dev *idev;
 912
 913	ASSERT_RTNL();
 914
 915	/* we need to take a reference on idev */
 916	idev = in6_dev_get(dev);
 917
 918	if (!idev)
 919		return -EINVAL;
 920
 
 921	if (idev->dead) {
 
 922		in6_dev_put(idev);
 923		return -ENODEV;
 924	}
 925
 926	mutex_lock(&idev->mc_lock);
 927	for_each_mc_mclock(idev, mc) {
 928		if (ipv6_addr_equal(&mc->mca_addr, addr)) {
 929			mc->mca_users++;
 
 930			ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
 931			mutex_unlock(&idev->mc_lock);
 932			in6_dev_put(idev);
 933			return 0;
 934		}
 935	}
 936
 937	mc = mca_alloc(idev, addr, mode);
 938	if (!mc) {
 939		mutex_unlock(&idev->mc_lock);
 940		in6_dev_put(idev);
 941		return -ENOMEM;
 942	}
 943
 944	rcu_assign_pointer(mc->next, idev->mc_list);
 945	rcu_assign_pointer(idev->mc_list, mc);
 946
 
 
 
 947	mca_get(mc);
 
 948
 949	mld_del_delrec(idev, mc);
 950	igmp6_group_added(mc);
 951	mutex_unlock(&idev->mc_lock);
 952	ma_put(mc);
 953	return 0;
 954}
 955
 956int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
 957{
 958	return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
 959}
 960EXPORT_SYMBOL(ipv6_dev_mc_inc);
 961
 962/*
 963 * device multicast group del
 964 */
 965int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
 966{
 967	struct ifmcaddr6 *ma, __rcu **map;
 968
 969	ASSERT_RTNL();
 970
 971	mutex_lock(&idev->mc_lock);
 972	for (map = &idev->mc_list;
 973	     (ma = mc_dereference(*map, idev));
 974	     map = &ma->next) {
 975		if (ipv6_addr_equal(&ma->mca_addr, addr)) {
 976			if (--ma->mca_users == 0) {
 977				*map = ma->next;
 
 978
 979				igmp6_group_dropped(ma);
 980				ip6_mc_clear_src(ma);
 981				mutex_unlock(&idev->mc_lock);
 982
 983				ma_put(ma);
 984				return 0;
 985			}
 986			mutex_unlock(&idev->mc_lock);
 987			return 0;
 988		}
 989	}
 
 990
 991	mutex_unlock(&idev->mc_lock);
 992	return -ENOENT;
 993}
 994
 995int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
 996{
 997	struct inet6_dev *idev;
 998	int err;
 999
1000	ASSERT_RTNL();
1001
1002	idev = __in6_dev_get(dev);
1003	if (!idev)
1004		err = -ENODEV;
1005	else
1006		err = __ipv6_dev_mc_dec(idev, addr);
1007
1008	return err;
1009}
1010EXPORT_SYMBOL(ipv6_dev_mc_dec);
1011
1012/*
1013 *	check if the interface/address pair is valid
1014 */
1015bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
1016			 const struct in6_addr *src_addr)
1017{
1018	struct inet6_dev *idev;
1019	struct ifmcaddr6 *mc;
1020	bool rv = false;
1021
1022	rcu_read_lock();
1023	idev = __in6_dev_get(dev);
1024	if (idev) {
1025		for_each_mc_rcu(idev, mc) {
 
1026			if (ipv6_addr_equal(&mc->mca_addr, group))
1027				break;
1028		}
1029		if (mc) {
1030			if (src_addr && !ipv6_addr_any(src_addr)) {
1031				struct ip6_sf_list *psf;
1032
1033				for_each_psf_rcu(mc, psf) {
 
1034					if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1035						break;
1036				}
1037				if (psf)
1038					rv = psf->sf_count[MCAST_INCLUDE] ||
1039						psf->sf_count[MCAST_EXCLUDE] !=
1040						mc->mca_sfcount[MCAST_EXCLUDE];
1041				else
1042					rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
 
1043			} else
1044				rv = true; /* don't filter unspecified source */
1045		}
 
1046	}
1047	rcu_read_unlock();
1048	return rv;
1049}
1050
1051/* called with mc_lock */
1052static void mld_gq_start_work(struct inet6_dev *idev)
1053{
1054	unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
1055
1056	idev->mc_gq_running = 1;
1057	if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
1058		in6_dev_hold(idev);
1059}
1060
1061/* called with mc_lock */
1062static void mld_gq_stop_work(struct inet6_dev *idev)
1063{
1064	idev->mc_gq_running = 0;
1065	if (cancel_delayed_work(&idev->mc_gq_work))
1066		__in6_dev_put(idev);
1067}
1068
1069/* called with mc_lock */
1070static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1071{
1072	unsigned long tv = get_random_u32_below(delay);
1073
1074	if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
1075		in6_dev_hold(idev);
1076}
1077
1078/* called with mc_lock */
1079static void mld_ifc_stop_work(struct inet6_dev *idev)
1080{
1081	idev->mc_ifc_count = 0;
1082	if (cancel_delayed_work(&idev->mc_ifc_work))
1083		__in6_dev_put(idev);
1084}
1085
1086/* called with mc_lock */
1087static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1088{
1089	unsigned long tv = get_random_u32_below(delay);
1090
1091	if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
1092		in6_dev_hold(idev);
1093}
1094
1095static void mld_dad_stop_work(struct inet6_dev *idev)
1096{
1097	if (cancel_delayed_work(&idev->mc_dad_work))
1098		__in6_dev_put(idev);
1099}
1100
1101static void mld_query_stop_work(struct inet6_dev *idev)
1102{
1103	spin_lock_bh(&idev->mc_query_lock);
1104	if (cancel_delayed_work(&idev->mc_query_work))
1105		__in6_dev_put(idev);
1106	spin_unlock_bh(&idev->mc_query_lock);
1107}
1108
1109static void mld_report_stop_work(struct inet6_dev *idev)
1110{
1111	if (cancel_delayed_work_sync(&idev->mc_report_work))
1112		__in6_dev_put(idev);
1113}
1114
1115/*
1116 * IGMP handling (alias multicast ICMPv6 messages)
1117 * called with mc_lock
1118 */
 
1119static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1120{
1121	unsigned long delay = resptime;
1122
1123	/* Do not start work for these addresses */
1124	if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1125	    IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1126		return;
1127
1128	if (cancel_delayed_work(&ma->mca_work)) {
1129		refcount_dec(&ma->mca_refcnt);
1130		delay = ma->mca_work.timer.expires - jiffies;
1131	}
1132
1133	if (delay >= resptime)
1134		delay = get_random_u32_below(resptime);
1135
1136	if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
 
1137		refcount_inc(&ma->mca_refcnt);
1138	ma->mca_flags |= MAF_TIMER_RUNNING;
1139}
1140
1141/* mark EXCLUDE-mode sources
1142 * called with mc_lock
1143 */
1144static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1145			     const struct in6_addr *srcs)
1146{
1147	struct ip6_sf_list *psf;
1148	int i, scount;
1149
1150	scount = 0;
1151	for_each_psf_mclock(pmc, psf) {
1152		if (scount == nsrcs)
1153			break;
1154		for (i = 0; i < nsrcs; i++) {
1155			/* skip inactive filters */
1156			if (psf->sf_count[MCAST_INCLUDE] ||
1157			    pmc->mca_sfcount[MCAST_EXCLUDE] !=
1158			    psf->sf_count[MCAST_EXCLUDE])
1159				break;
1160			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1161				scount++;
1162				break;
1163			}
1164		}
1165	}
1166	pmc->mca_flags &= ~MAF_GSQUERY;
1167	if (scount == nsrcs)	/* all sources excluded */
1168		return false;
1169	return true;
1170}
1171
1172/* called with mc_lock */
1173static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1174			    const struct in6_addr *srcs)
1175{
1176	struct ip6_sf_list *psf;
1177	int i, scount;
1178
1179	if (pmc->mca_sfmode == MCAST_EXCLUDE)
1180		return mld_xmarksources(pmc, nsrcs, srcs);
1181
1182	/* mark INCLUDE-mode sources */
1183
1184	scount = 0;
1185	for_each_psf_mclock(pmc, psf) {
1186		if (scount == nsrcs)
1187			break;
1188		for (i = 0; i < nsrcs; i++) {
1189			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1190				psf->sf_gsresp = 1;
1191				scount++;
1192				break;
1193			}
1194		}
1195	}
1196	if (!scount) {
1197		pmc->mca_flags &= ~MAF_GSQUERY;
1198		return false;
1199	}
1200	pmc->mca_flags |= MAF_GSQUERY;
1201	return true;
1202}
1203
1204static int mld_force_mld_version(const struct inet6_dev *idev)
1205{
1206	const struct net *net = dev_net(idev->dev);
1207	int all_force;
1208
1209	all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
1210	/* Normally, both are 0 here. If enforcement to a particular is
1211	 * being used, individual device enforcement will have a lower
1212	 * precedence over 'all' device (.../conf/all/force_mld_version).
1213	 */
1214	return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
 
 
 
 
1215}
1216
1217static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1218{
1219	return mld_force_mld_version(idev) == 2;
1220}
1221
1222static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1223{
1224	return mld_force_mld_version(idev) == 1;
1225}
1226
1227static bool mld_in_v1_mode(const struct inet6_dev *idev)
1228{
1229	if (mld_in_v2_mode_only(idev))
1230		return false;
1231	if (mld_in_v1_mode_only(idev))
1232		return true;
1233	if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1234		return true;
1235
1236	return false;
1237}
1238
1239static void mld_set_v1_mode(struct inet6_dev *idev)
1240{
1241	/* RFC3810, relevant sections:
1242	 *  - 9.1. Robustness Variable
1243	 *  - 9.2. Query Interval
1244	 *  - 9.3. Query Response Interval
1245	 *  - 9.12. Older Version Querier Present Timeout
1246	 */
1247	unsigned long switchback;
1248
1249	switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1250
1251	idev->mc_v1_seen = jiffies + switchback;
1252}
1253
1254static void mld_update_qrv(struct inet6_dev *idev,
1255			   const struct mld2_query *mlh2)
1256{
1257	/* RFC3810, relevant sections:
1258	 *  - 5.1.8. QRV (Querier's Robustness Variable)
1259	 *  - 9.1. Robustness Variable
1260	 */
1261
1262	/* The value of the Robustness Variable MUST NOT be zero,
1263	 * and SHOULD NOT be one. Catch this here if we ever run
1264	 * into such a case in future.
1265	 */
1266	const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1267	WARN_ON(idev->mc_qrv == 0);
1268
1269	if (mlh2->mld2q_qrv > 0)
1270		idev->mc_qrv = mlh2->mld2q_qrv;
1271
1272	if (unlikely(idev->mc_qrv < min_qrv)) {
1273		net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1274				     idev->mc_qrv, min_qrv);
1275		idev->mc_qrv = min_qrv;
1276	}
1277}
1278
1279static void mld_update_qi(struct inet6_dev *idev,
1280			  const struct mld2_query *mlh2)
1281{
1282	/* RFC3810, relevant sections:
1283	 *  - 5.1.9. QQIC (Querier's Query Interval Code)
1284	 *  - 9.2. Query Interval
1285	 *  - 9.12. Older Version Querier Present Timeout
1286	 *    (the [Query Interval] in the last Query received)
1287	 */
1288	unsigned long mc_qqi;
1289
1290	if (mlh2->mld2q_qqic < 128) {
1291		mc_qqi = mlh2->mld2q_qqic;
1292	} else {
1293		unsigned long mc_man, mc_exp;
1294
1295		mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1296		mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1297
1298		mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1299	}
1300
1301	idev->mc_qi = mc_qqi * HZ;
1302}
1303
1304static void mld_update_qri(struct inet6_dev *idev,
1305			   const struct mld2_query *mlh2)
1306{
1307	/* RFC3810, relevant sections:
1308	 *  - 5.1.3. Maximum Response Code
1309	 *  - 9.3. Query Response Interval
1310	 */
1311	idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1312}
1313
1314static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1315			  unsigned long *max_delay, bool v1_query)
1316{
1317	unsigned long mldv1_md;
1318
1319	/* Ignore v1 queries */
1320	if (mld_in_v2_mode_only(idev))
1321		return -EINVAL;
1322
1323	mldv1_md = ntohs(mld->mld_maxdelay);
1324
1325	/* When in MLDv1 fallback and a MLDv2 router start-up being
1326	 * unaware of current MLDv1 operation, the MRC == MRD mapping
1327	 * only works when the exponential algorithm is not being
1328	 * used (as MLDv1 is unaware of such things).
1329	 *
1330	 * According to the RFC author, the MLDv2 implementations
1331	 * he's aware of all use a MRC < 32768 on start up queries.
1332	 *
1333	 * Thus, should we *ever* encounter something else larger
1334	 * than that, just assume the maximum possible within our
1335	 * reach.
1336	 */
1337	if (!v1_query)
1338		mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1339
1340	*max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1341
1342	/* MLDv1 router present: we need to go into v1 mode *only*
1343	 * when an MLDv1 query is received as per section 9.12. of
1344	 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1345	 * queries MUST be of exactly 24 octets.
1346	 */
1347	if (v1_query)
1348		mld_set_v1_mode(idev);
1349
1350	/* cancel MLDv2 report work */
1351	mld_gq_stop_work(idev);
1352	/* cancel the interface change work */
1353	mld_ifc_stop_work(idev);
1354	/* clear deleted report items */
1355	mld_clear_delrec(idev);
1356
1357	return 0;
1358}
1359
1360static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1361			   unsigned long *max_delay)
1362{
1363	*max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1364
1365	mld_update_qrv(idev, mld);
1366	mld_update_qi(idev, mld);
1367	mld_update_qri(idev, mld);
1368
1369	idev->mc_maxdelay = *max_delay;
1370
1371	return;
1372}
1373
1374/* called with rcu_read_lock() */
1375void igmp6_event_query(struct sk_buff *skb)
1376{
1377	struct inet6_dev *idev = __in6_dev_get(skb->dev);
1378
1379	if (!idev || idev->dead)
1380		goto out;
1381
1382	spin_lock_bh(&idev->mc_query_lock);
1383	if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
1384		__skb_queue_tail(&idev->mc_query_queue, skb);
1385		if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
1386			in6_dev_hold(idev);
1387		skb = NULL;
1388	}
1389	spin_unlock_bh(&idev->mc_query_lock);
1390out:
1391	kfree_skb(skb);
1392}
1393
1394static void __mld_query_work(struct sk_buff *skb)
1395{
1396	struct mld2_query *mlh2 = NULL;
 
1397	const struct in6_addr *group;
1398	unsigned long max_delay;
1399	struct inet6_dev *idev;
1400	struct ifmcaddr6 *ma;
1401	struct mld_msg *mld;
1402	int group_type;
1403	int mark = 0;
1404	int len, err;
1405
1406	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1407		goto kfree_skb;
1408
1409	/* compute payload length excluding extension headers */
1410	len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1411	len -= skb_network_header_len(skb);
1412
1413	/* RFC3810 6.2
1414	 * Upon reception of an MLD message that contains a Query, the node
1415	 * checks if the source address of the message is a valid link-local
1416	 * address, if the Hop Limit is set to 1, and if the Router Alert
1417	 * option is present in the Hop-By-Hop Options header of the IPv6
1418	 * packet.  If any of these checks fails, the packet is dropped.
1419	 */
1420	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1421	    ipv6_hdr(skb)->hop_limit != 1 ||
1422	    !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1423	    IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1424		goto kfree_skb;
1425
1426	idev = in6_dev_get(skb->dev);
1427	if (!idev)
1428		goto kfree_skb;
1429
1430	mld = (struct mld_msg *)icmp6_hdr(skb);
1431	group = &mld->mld_mca;
1432	group_type = ipv6_addr_type(group);
1433
1434	if (group_type != IPV6_ADDR_ANY &&
1435	    !(group_type&IPV6_ADDR_MULTICAST))
1436		goto out;
1437
1438	if (len < MLD_V1_QUERY_LEN) {
1439		goto out;
1440	} else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1441		err = mld_process_v1(idev, mld, &max_delay,
1442				     len == MLD_V1_QUERY_LEN);
1443		if (err < 0)
1444			goto out;
1445	} else if (len >= MLD_V2_QUERY_LEN_MIN) {
1446		int srcs_offset = sizeof(struct mld2_query) -
1447				  sizeof(struct icmp6hdr);
1448
1449		if (!pskb_may_pull(skb, srcs_offset))
1450			goto out;
1451
1452		mlh2 = (struct mld2_query *)skb_transport_header(skb);
1453
1454		mld_process_v2(idev, mlh2, &max_delay);
 
 
1455
1456		if (group_type == IPV6_ADDR_ANY) { /* general query */
1457			if (mlh2->mld2q_nsrcs)
1458				goto out; /* no sources allowed */
1459
1460			mld_gq_start_work(idev);
1461			goto out;
1462		}
1463		/* mark sources to include, if group & source-specific */
1464		if (mlh2->mld2q_nsrcs != 0) {
1465			if (!pskb_may_pull(skb, srcs_offset +
1466			    ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1467				goto out;
1468
1469			mlh2 = (struct mld2_query *)skb_transport_header(skb);
1470			mark = 1;
1471		}
1472	} else {
1473		goto out;
1474	}
1475
 
1476	if (group_type == IPV6_ADDR_ANY) {
1477		for_each_mc_mclock(idev, ma) {
 
1478			igmp6_group_queried(ma, max_delay);
 
1479		}
1480	} else {
1481		for_each_mc_mclock(idev, ma) {
1482			if (!ipv6_addr_equal(group, &ma->mca_addr))
1483				continue;
 
1484			if (ma->mca_flags & MAF_TIMER_RUNNING) {
1485				/* gsquery <- gsquery && mark */
1486				if (!mark)
1487					ma->mca_flags &= ~MAF_GSQUERY;
1488			} else {
1489				/* gsquery <- mark */
1490				if (mark)
1491					ma->mca_flags |= MAF_GSQUERY;
1492				else
1493					ma->mca_flags &= ~MAF_GSQUERY;
1494			}
1495			if (!(ma->mca_flags & MAF_GSQUERY) ||
1496			    mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1497				igmp6_group_queried(ma, max_delay);
 
1498			break;
1499		}
1500	}
 
1501
1502out:
1503	in6_dev_put(idev);
1504kfree_skb:
1505	consume_skb(skb);
1506}
1507
1508static void mld_query_work(struct work_struct *work)
1509{
1510	struct inet6_dev *idev = container_of(to_delayed_work(work),
1511					      struct inet6_dev,
1512					      mc_query_work);
1513	struct sk_buff_head q;
1514	struct sk_buff *skb;
1515	bool rework = false;
1516	int cnt = 0;
1517
1518	skb_queue_head_init(&q);
1519
1520	spin_lock_bh(&idev->mc_query_lock);
1521	while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
1522		__skb_queue_tail(&q, skb);
1523
1524		if (++cnt >= MLD_MAX_QUEUE) {
1525			rework = true;
1526			break;
1527		}
1528	}
1529	spin_unlock_bh(&idev->mc_query_lock);
1530
1531	mutex_lock(&idev->mc_lock);
1532	while ((skb = __skb_dequeue(&q)))
1533		__mld_query_work(skb);
1534	mutex_unlock(&idev->mc_lock);
1535
1536	if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
1537		return;
1538
1539	in6_dev_put(idev);
1540}
1541
1542/* called with rcu_read_lock() */
1543void igmp6_event_report(struct sk_buff *skb)
1544{
1545	struct inet6_dev *idev = __in6_dev_get(skb->dev);
1546
1547	if (!idev || idev->dead)
1548		goto out;
1549
1550	spin_lock_bh(&idev->mc_report_lock);
1551	if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
1552		__skb_queue_tail(&idev->mc_report_queue, skb);
1553		if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
1554			in6_dev_hold(idev);
1555		skb = NULL;
1556	}
1557	spin_unlock_bh(&idev->mc_report_lock);
1558out:
1559	kfree_skb(skb);
1560}
1561
1562static void __mld_report_work(struct sk_buff *skb)
1563{
 
1564	struct inet6_dev *idev;
1565	struct ifmcaddr6 *ma;
1566	struct mld_msg *mld;
1567	int addr_type;
1568
1569	/* Our own report looped back. Ignore it. */
1570	if (skb->pkt_type == PACKET_LOOPBACK)
1571		goto kfree_skb;
1572
1573	/* send our report if the MC router may not have heard this report */
1574	if (skb->pkt_type != PACKET_MULTICAST &&
1575	    skb->pkt_type != PACKET_BROADCAST)
1576		goto kfree_skb;
1577
1578	if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1579		goto kfree_skb;
1580
1581	mld = (struct mld_msg *)icmp6_hdr(skb);
1582
1583	/* Drop reports with not link local source */
1584	addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1585	if (addr_type != IPV6_ADDR_ANY &&
1586	    !(addr_type&IPV6_ADDR_LINKLOCAL))
1587		goto kfree_skb;
1588
1589	idev = in6_dev_get(skb->dev);
1590	if (!idev)
1591		goto kfree_skb;
1592
1593	/*
1594	 *	Cancel the work for this group
1595	 */
1596
1597	for_each_mc_mclock(idev, ma) {
 
1598		if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1599			if (cancel_delayed_work(&ma->mca_work))
 
1600				refcount_dec(&ma->mca_refcnt);
1601			ma->mca_flags &= ~(MAF_LAST_REPORTER |
1602					   MAF_TIMER_RUNNING);
1603			break;
1604		}
1605	}
1606
1607	in6_dev_put(idev);
1608kfree_skb:
1609	consume_skb(skb);
1610}
1611
1612static void mld_report_work(struct work_struct *work)
1613{
1614	struct inet6_dev *idev = container_of(to_delayed_work(work),
1615					      struct inet6_dev,
1616					      mc_report_work);
1617	struct sk_buff_head q;
1618	struct sk_buff *skb;
1619	bool rework = false;
1620	int cnt = 0;
1621
1622	skb_queue_head_init(&q);
1623	spin_lock_bh(&idev->mc_report_lock);
1624	while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
1625		__skb_queue_tail(&q, skb);
1626
1627		if (++cnt >= MLD_MAX_QUEUE) {
1628			rework = true;
1629			break;
1630		}
1631	}
1632	spin_unlock_bh(&idev->mc_report_lock);
1633
1634	mutex_lock(&idev->mc_lock);
1635	while ((skb = __skb_dequeue(&q)))
1636		__mld_report_work(skb);
1637	mutex_unlock(&idev->mc_lock);
1638
1639	if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
1640		return;
1641
1642	in6_dev_put(idev);
1643}
1644
1645static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1646		  int gdeleted, int sdeleted)
1647{
1648	switch (type) {
1649	case MLD2_MODE_IS_INCLUDE:
1650	case MLD2_MODE_IS_EXCLUDE:
1651		if (gdeleted || sdeleted)
1652			return false;
1653		if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1654			if (pmc->mca_sfmode == MCAST_INCLUDE)
1655				return true;
1656			/* don't include if this source is excluded
1657			 * in all filters
1658			 */
1659			if (psf->sf_count[MCAST_INCLUDE])
1660				return type == MLD2_MODE_IS_INCLUDE;
1661			return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1662				psf->sf_count[MCAST_EXCLUDE];
1663		}
1664		return false;
1665	case MLD2_CHANGE_TO_INCLUDE:
1666		if (gdeleted || sdeleted)
1667			return false;
1668		return psf->sf_count[MCAST_INCLUDE] != 0;
1669	case MLD2_CHANGE_TO_EXCLUDE:
1670		if (gdeleted || sdeleted)
1671			return false;
1672		if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1673		    psf->sf_count[MCAST_INCLUDE])
1674			return false;
1675		return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1676			psf->sf_count[MCAST_EXCLUDE];
1677	case MLD2_ALLOW_NEW_SOURCES:
1678		if (gdeleted || !psf->sf_crcount)
1679			return false;
1680		return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1681	case MLD2_BLOCK_OLD_SOURCES:
1682		if (pmc->mca_sfmode == MCAST_INCLUDE)
1683			return gdeleted || (psf->sf_crcount && sdeleted);
1684		return psf->sf_crcount && !gdeleted && !sdeleted;
1685	}
1686	return false;
1687}
1688
1689static int
1690mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1691{
1692	struct ip6_sf_list *psf;
1693	int scount = 0;
1694
1695	for_each_psf_mclock(pmc, psf) {
1696		if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1697			continue;
1698		scount++;
1699	}
1700	return scount;
1701}
1702
1703static void ip6_mc_hdr(const struct sock *sk, struct sk_buff *skb,
1704		       struct net_device *dev, const struct in6_addr *saddr,
1705		       const struct in6_addr *daddr, int proto, int len)
 
 
1706{
1707	struct ipv6hdr *hdr;
1708
1709	skb->protocol = htons(ETH_P_IPV6);
1710	skb->dev = dev;
1711
1712	skb_reset_network_header(skb);
1713	skb_put(skb, sizeof(struct ipv6hdr));
1714	hdr = ipv6_hdr(skb);
1715
1716	ip6_flow_hdr(hdr, 0, 0);
1717
1718	hdr->payload_len = htons(len);
1719	hdr->nexthdr = proto;
1720	hdr->hop_limit = READ_ONCE(inet6_sk(sk)->hop_limit);
1721
1722	hdr->saddr = *saddr;
1723	hdr->daddr = *daddr;
1724}
1725
1726static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1727{
1728	u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
1729		     2, 0, 0, IPV6_TLV_PADN, 0 };
1730	struct net_device *dev = idev->dev;
 
 
 
 
 
 
1731	int hlen = LL_RESERVED_SPACE(dev);
1732	int tlen = dev->needed_tailroom;
1733	const struct in6_addr *saddr;
1734	struct in6_addr addr_buf;
1735	struct mld2_report *pmr;
1736	struct sk_buff *skb;
1737	unsigned int size;
1738	struct sock *sk;
1739	struct net *net;
 
 
 
1740
1741	/* we assume size > sizeof(ra) here
1742	 * Also try to not allocate high-order pages for big MTU
1743	 */
1744	size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
1745	skb = alloc_skb(size, GFP_KERNEL);
1746	if (!skb)
1747		return NULL;
1748
1749	skb->priority = TC_PRIO_CONTROL;
1750	skb_reserve(skb, hlen);
1751	skb_tailroom_reserve(skb, mtu, tlen);
1752
1753	rcu_read_lock();
1754
1755	net = dev_net_rcu(dev);
1756	sk = net->ipv6.igmp_sk;
1757	skb_set_owner_w(skb, sk);
1758
1759	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1760		/* <draft-ietf-magma-mld-source-05.txt>:
1761		 * use unspecified address as the source address
1762		 * when a valid link-local address is not available.
1763		 */
1764		saddr = &in6addr_any;
1765	} else
1766		saddr = &addr_buf;
1767
1768	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1769
1770	rcu_read_unlock();
1771
1772	skb_put_data(skb, ra, sizeof(ra));
1773
1774	skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1775	skb_put(skb, sizeof(*pmr));
1776	pmr = (struct mld2_report *)skb_transport_header(skb);
1777	pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1778	pmr->mld2r_resv1 = 0;
1779	pmr->mld2r_cksum = 0;
1780	pmr->mld2r_resv2 = 0;
1781	pmr->mld2r_ngrec = 0;
1782	return skb;
1783}
1784
1785static void mld_sendpack(struct sk_buff *skb)
1786{
1787	struct ipv6hdr *pip6 = ipv6_hdr(skb);
1788	struct mld2_report *pmr =
1789			      (struct mld2_report *)skb_transport_header(skb);
1790	int payload_len, mldlen;
1791	struct inet6_dev *idev;
1792	struct net *net = dev_net(skb->dev);
1793	int err;
1794	struct flowi6 fl6;
1795	struct dst_entry *dst;
1796
1797	rcu_read_lock();
1798	idev = __in6_dev_get(skb->dev);
1799	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
1800
1801	payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1802		sizeof(*pip6);
1803	mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1804	pip6->payload_len = htons(payload_len);
1805
1806	pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1807					   IPPROTO_ICMPV6,
1808					   csum_partial(skb_transport_header(skb),
1809							mldlen, 0));
1810
1811	icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1812			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1813			 skb->dev->ifindex);
1814	dst = icmp6_dst_alloc(skb->dev, &fl6);
1815
1816	err = 0;
1817	if (IS_ERR(dst)) {
1818		err = PTR_ERR(dst);
1819		dst = NULL;
1820	}
1821	skb_dst_set(skb, dst);
1822	if (err)
1823		goto err_out;
1824
1825	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1826		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1827		      dst_output);
1828out:
1829	if (!err) {
1830		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1831		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1832	} else {
1833		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1834	}
1835
1836	rcu_read_unlock();
1837	return;
1838
1839err_out:
1840	kfree_skb(skb);
1841	goto out;
1842}
1843
1844static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1845{
1846	return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1847}
1848
1849static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1850	int type, struct mld2_grec **ppgr, unsigned int mtu)
1851{
1852	struct mld2_report *pmr;
1853	struct mld2_grec *pgr;
1854
1855	if (!skb) {
1856		skb = mld_newpack(pmc->idev, mtu);
1857		if (!skb)
1858			return NULL;
1859	}
1860	pgr = skb_put(skb, sizeof(struct mld2_grec));
1861	pgr->grec_type = type;
1862	pgr->grec_auxwords = 0;
1863	pgr->grec_nsrcs = 0;
1864	pgr->grec_mca = pmc->mca_addr;	/* structure copy */
1865	pmr = (struct mld2_report *)skb_transport_header(skb);
1866	pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1867	*ppgr = pgr;
1868	return skb;
1869}
1870
1871#define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
1872
1873/* called with mc_lock */
1874static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1875				int type, int gdeleted, int sdeleted,
1876				int crsend)
1877{
1878	struct ip6_sf_list *psf, *psf_prev, *psf_next;
1879	int scount, stotal, first, isquery, truncate;
1880	struct ip6_sf_list __rcu **psf_list;
1881	struct inet6_dev *idev = pmc->idev;
1882	struct net_device *dev = idev->dev;
 
1883	struct mld2_grec *pgr = NULL;
1884	struct mld2_report *pmr;
 
1885	unsigned int mtu;
1886
1887	if (pmc->mca_flags & MAF_NOREPORT)
1888		return skb;
1889
1890	mtu = READ_ONCE(dev->mtu);
1891	if (mtu < IPV6_MIN_MTU)
1892		return skb;
1893
1894	isquery = type == MLD2_MODE_IS_INCLUDE ||
1895		  type == MLD2_MODE_IS_EXCLUDE;
1896	truncate = type == MLD2_MODE_IS_EXCLUDE ||
1897		    type == MLD2_CHANGE_TO_EXCLUDE;
1898
1899	stotal = scount = 0;
1900
1901	psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1902
1903	if (!rcu_access_pointer(*psf_list))
1904		goto empty_source;
1905
1906	pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1907
1908	/* EX and TO_EX get a fresh packet, if needed */
1909	if (truncate) {
1910		if (pmr && pmr->mld2r_ngrec &&
1911		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1912			if (skb)
1913				mld_sendpack(skb);
1914			skb = mld_newpack(idev, mtu);
1915		}
1916	}
1917	first = 1;
1918	psf_prev = NULL;
1919	for (psf = mc_dereference(*psf_list, idev);
1920	     psf;
1921	     psf = psf_next) {
1922		struct in6_addr *psrc;
1923
1924		psf_next = mc_dereference(psf->sf_next, idev);
1925
1926		if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1927			psf_prev = psf;
1928			continue;
1929		}
1930
1931		/* Based on RFC3810 6.1. Should not send source-list change
1932		 * records when there is a filter mode change.
1933		 */
1934		if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1935		     (!gdeleted && pmc->mca_crcount)) &&
1936		    (type == MLD2_ALLOW_NEW_SOURCES ||
1937		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1938			goto decrease_sf_crcount;
1939
1940		/* clear marks on query responses */
1941		if (isquery)
1942			psf->sf_gsresp = 0;
1943
1944		if (AVAILABLE(skb) < sizeof(*psrc) +
1945		    first*sizeof(struct mld2_grec)) {
1946			if (truncate && !first)
1947				break;	 /* truncate these */
1948			if (pgr)
1949				pgr->grec_nsrcs = htons(scount);
1950			if (skb)
1951				mld_sendpack(skb);
1952			skb = mld_newpack(idev, mtu);
1953			first = 1;
1954			scount = 0;
1955		}
1956		if (first) {
1957			skb = add_grhead(skb, pmc, type, &pgr, mtu);
1958			first = 0;
1959		}
1960		if (!skb)
1961			return NULL;
1962		psrc = skb_put(skb, sizeof(*psrc));
1963		*psrc = psf->sf_addr;
1964		scount++; stotal++;
1965		if ((type == MLD2_ALLOW_NEW_SOURCES ||
1966		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1967decrease_sf_crcount:
1968			psf->sf_crcount--;
1969			if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1970				if (psf_prev)
1971					rcu_assign_pointer(psf_prev->sf_next,
1972							   mc_dereference(psf->sf_next, idev));
1973				else
1974					rcu_assign_pointer(*psf_list,
1975							   mc_dereference(psf->sf_next, idev));
1976				kfree_rcu(psf, rcu);
1977				continue;
1978			}
1979		}
1980		psf_prev = psf;
1981	}
1982
1983empty_source:
1984	if (!stotal) {
1985		if (type == MLD2_ALLOW_NEW_SOURCES ||
1986		    type == MLD2_BLOCK_OLD_SOURCES)
1987			return skb;
1988		if (pmc->mca_crcount || isquery || crsend) {
1989			/* make sure we have room for group header */
1990			if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1991				mld_sendpack(skb);
1992				skb = NULL; /* add_grhead will get a new one */
1993			}
1994			skb = add_grhead(skb, pmc, type, &pgr, mtu);
1995		}
1996	}
1997	if (pgr)
1998		pgr->grec_nsrcs = htons(scount);
1999
2000	if (isquery)
2001		pmc->mca_flags &= ~MAF_GSQUERY;	/* clear query state */
2002	return skb;
2003}
2004
2005/* called with mc_lock */
2006static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2007{
2008	struct sk_buff *skb = NULL;
2009	int type;
2010
 
2011	if (!pmc) {
2012		for_each_mc_mclock(idev, pmc) {
2013			if (pmc->mca_flags & MAF_NOREPORT)
2014				continue;
 
2015			if (pmc->mca_sfcount[MCAST_EXCLUDE])
2016				type = MLD2_MODE_IS_EXCLUDE;
2017			else
2018				type = MLD2_MODE_IS_INCLUDE;
2019			skb = add_grec(skb, pmc, type, 0, 0, 0);
 
2020		}
2021	} else {
 
2022		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2023			type = MLD2_MODE_IS_EXCLUDE;
2024		else
2025			type = MLD2_MODE_IS_INCLUDE;
2026		skb = add_grec(skb, pmc, type, 0, 0, 0);
 
2027	}
 
2028	if (skb)
2029		mld_sendpack(skb);
2030}
2031
2032/*
2033 * remove zero-count source records from a source filter list
2034 * called with mc_lock
2035 */
2036static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2037{
2038	struct ip6_sf_list *psf_prev, *psf_next, *psf;
2039
2040	psf_prev = NULL;
2041	for (psf = mc_dereference(*ppsf, idev);
2042	     psf;
2043	     psf = psf_next) {
2044		psf_next = mc_dereference(psf->sf_next, idev);
2045		if (psf->sf_crcount == 0) {
2046			if (psf_prev)
2047				rcu_assign_pointer(psf_prev->sf_next,
2048						   mc_dereference(psf->sf_next, idev));
2049			else
2050				rcu_assign_pointer(*ppsf,
2051						   mc_dereference(psf->sf_next, idev));
2052			kfree_rcu(psf, rcu);
2053		} else {
2054			psf_prev = psf;
2055		}
2056	}
2057}
2058
2059/* called with mc_lock */
2060static void mld_send_cr(struct inet6_dev *idev)
2061{
2062	struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
2063	struct sk_buff *skb = NULL;
2064	int type, dtype;
2065
 
 
 
2066	/* deleted MCA's */
2067	pmc_prev = NULL;
2068	for (pmc = mc_dereference(idev->mc_tomb, idev);
2069	     pmc;
2070	     pmc = pmc_next) {
2071		pmc_next = mc_dereference(pmc->next, idev);
2072		if (pmc->mca_sfmode == MCAST_INCLUDE) {
2073			type = MLD2_BLOCK_OLD_SOURCES;
2074			dtype = MLD2_BLOCK_OLD_SOURCES;
2075			skb = add_grec(skb, pmc, type, 1, 0, 0);
2076			skb = add_grec(skb, pmc, dtype, 1, 1, 0);
2077		}
2078		if (pmc->mca_crcount) {
2079			if (pmc->mca_sfmode == MCAST_EXCLUDE) {
2080				type = MLD2_CHANGE_TO_INCLUDE;
2081				skb = add_grec(skb, pmc, type, 1, 0, 0);
2082			}
2083			pmc->mca_crcount--;
2084			if (pmc->mca_crcount == 0) {
2085				mld_clear_zeros(&pmc->mca_tomb, idev);
2086				mld_clear_zeros(&pmc->mca_sources, idev);
2087			}
2088		}
2089		if (pmc->mca_crcount == 0 &&
2090		    !rcu_access_pointer(pmc->mca_tomb) &&
2091		    !rcu_access_pointer(pmc->mca_sources)) {
2092			if (pmc_prev)
2093				rcu_assign_pointer(pmc_prev->next, pmc_next);
2094			else
2095				rcu_assign_pointer(idev->mc_tomb, pmc_next);
2096			in6_dev_put(pmc->idev);
2097			kfree_rcu(pmc, rcu);
2098		} else
2099			pmc_prev = pmc;
2100	}
 
2101
2102	/* change recs */
2103	for_each_mc_mclock(idev, pmc) {
 
2104		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2105			type = MLD2_BLOCK_OLD_SOURCES;
2106			dtype = MLD2_ALLOW_NEW_SOURCES;
2107		} else {
2108			type = MLD2_ALLOW_NEW_SOURCES;
2109			dtype = MLD2_BLOCK_OLD_SOURCES;
2110		}
2111		skb = add_grec(skb, pmc, type, 0, 0, 0);
2112		skb = add_grec(skb, pmc, dtype, 0, 1, 0);	/* deleted sources */
2113
2114		/* filter mode changes */
2115		if (pmc->mca_crcount) {
2116			if (pmc->mca_sfmode == MCAST_EXCLUDE)
2117				type = MLD2_CHANGE_TO_EXCLUDE;
2118			else
2119				type = MLD2_CHANGE_TO_INCLUDE;
2120			skb = add_grec(skb, pmc, type, 0, 0, 0);
2121			pmc->mca_crcount--;
2122		}
 
2123	}
 
2124	if (!skb)
2125		return;
2126	(void) mld_sendpack(skb);
2127}
2128
2129static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
2130{
2131	const struct in6_addr *snd_addr, *saddr;
2132	int err, len, payload_len, full_len;
2133	struct in6_addr addr_buf;
2134	struct inet6_dev *idev;
2135	struct sk_buff *skb;
2136	struct mld_msg *hdr;
 
 
2137	int hlen = LL_RESERVED_SPACE(dev);
2138	int tlen = dev->needed_tailroom;
 
2139	u8 ra[8] = { IPPROTO_ICMPV6, 0,
2140		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
2141		     IPV6_TLV_PADN, 0 };
 
2142	struct dst_entry *dst;
2143	struct flowi6 fl6;
2144	struct net *net;
2145	struct sock *sk;
2146
2147	if (type == ICMPV6_MGM_REDUCTION)
2148		snd_addr = &in6addr_linklocal_allrouters;
2149	else
2150		snd_addr = addr;
2151
2152	len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2153	payload_len = len + sizeof(ra);
2154	full_len = sizeof(struct ipv6hdr) + payload_len;
2155
2156	skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
 
 
 
2157
2158	rcu_read_lock();
2159
2160	net = dev_net_rcu(dev);
2161	idev = __in6_dev_get(dev);
2162	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
2163	if (!skb) {
2164		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 
 
2165		rcu_read_unlock();
2166		return;
2167	}
2168	sk = net->ipv6.igmp_sk;
2169	skb_set_owner_w(skb, sk);
2170
2171	skb->priority = TC_PRIO_CONTROL;
2172	skb_reserve(skb, hlen);
2173
2174	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2175		/* <draft-ietf-magma-mld-source-05.txt>:
2176		 * use unspecified address as the source address
2177		 * when a valid link-local address is not available.
2178		 */
2179		saddr = &in6addr_any;
2180	} else
2181		saddr = &addr_buf;
2182
2183	ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2184
2185	skb_put_data(skb, ra, sizeof(ra));
2186
2187	hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2188	hdr->mld_type = type;
2189	hdr->mld_mca = *addr;
2190
2191	hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2192					 IPPROTO_ICMPV6,
2193					 csum_partial(hdr, len, 0));
2194
 
 
 
2195	icmpv6_flow_init(sk, &fl6, type,
2196			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2197			 skb->dev->ifindex);
2198	dst = icmp6_dst_alloc(skb->dev, &fl6);
2199	if (IS_ERR(dst)) {
2200		err = PTR_ERR(dst);
2201		goto err_out;
2202	}
2203
2204	skb_dst_set(skb, dst);
2205	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2206		      net, sk, skb, NULL, skb->dev,
2207		      dst_output);
2208out:
2209	if (!err) {
2210		ICMP6MSGOUT_INC_STATS(net, idev, type);
2211		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2212	} else
2213		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2214
2215	rcu_read_unlock();
2216	return;
2217
2218err_out:
2219	kfree_skb(skb);
2220	goto out;
2221}
2222
2223/* called with mc_lock */
2224static void mld_send_initial_cr(struct inet6_dev *idev)
2225{
2226	struct sk_buff *skb;
2227	struct ifmcaddr6 *pmc;
2228	int type;
2229
2230	if (mld_in_v1_mode(idev))
2231		return;
2232
2233	skb = NULL;
2234	for_each_mc_mclock(idev, pmc) {
 
 
2235		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2236			type = MLD2_CHANGE_TO_EXCLUDE;
2237		else
2238			type = MLD2_ALLOW_NEW_SOURCES;
2239		skb = add_grec(skb, pmc, type, 0, 0, 1);
 
2240	}
 
2241	if (skb)
2242		mld_sendpack(skb);
2243}
2244
2245void ipv6_mc_dad_complete(struct inet6_dev *idev)
2246{
2247	mutex_lock(&idev->mc_lock);
2248	idev->mc_dad_count = idev->mc_qrv;
2249	if (idev->mc_dad_count) {
2250		mld_send_initial_cr(idev);
2251		idev->mc_dad_count--;
2252		if (idev->mc_dad_count)
2253			mld_dad_start_work(idev,
2254					   unsolicited_report_interval(idev));
2255	}
2256	mutex_unlock(&idev->mc_lock);
2257}
2258
2259static void mld_dad_work(struct work_struct *work)
2260{
2261	struct inet6_dev *idev = container_of(to_delayed_work(work),
2262					      struct inet6_dev,
2263					      mc_dad_work);
2264	mutex_lock(&idev->mc_lock);
2265	mld_send_initial_cr(idev);
2266	if (idev->mc_dad_count) {
2267		idev->mc_dad_count--;
2268		if (idev->mc_dad_count)
2269			mld_dad_start_work(idev,
2270					   unsolicited_report_interval(idev));
2271	}
2272	mutex_unlock(&idev->mc_lock);
2273	in6_dev_put(idev);
2274}
2275
2276/* called with mc_lock */
2277static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2278	const struct in6_addr *psfsrc)
2279{
2280	struct ip6_sf_list *psf, *psf_prev;
2281	int rv = 0;
2282
2283	psf_prev = NULL;
2284	for_each_psf_mclock(pmc, psf) {
2285		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2286			break;
2287		psf_prev = psf;
2288	}
2289	if (!psf || psf->sf_count[sfmode] == 0) {
2290		/* source filter not found, or count wrong =>  bug */
2291		return -ESRCH;
2292	}
2293	psf->sf_count[sfmode]--;
2294	if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2295		struct inet6_dev *idev = pmc->idev;
2296
2297		/* no more filters for this source */
2298		if (psf_prev)
2299			rcu_assign_pointer(psf_prev->sf_next,
2300					   mc_dereference(psf->sf_next, idev));
2301		else
2302			rcu_assign_pointer(pmc->mca_sources,
2303					   mc_dereference(psf->sf_next, idev));
2304
2305		if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2306		    !mld_in_v1_mode(idev)) {
2307			psf->sf_crcount = idev->mc_qrv;
2308			rcu_assign_pointer(psf->sf_next,
2309					   mc_dereference(pmc->mca_tomb, idev));
2310			rcu_assign_pointer(pmc->mca_tomb, psf);
2311			rv = 1;
2312		} else {
2313			kfree_rcu(psf, rcu);
2314		}
2315	}
2316	return rv;
2317}
2318
2319/* called with mc_lock */
2320static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2321			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2322			  int delta)
2323{
2324	struct ifmcaddr6 *pmc;
2325	int	changerec = 0;
2326	int	i, err;
2327
2328	if (!idev)
2329		return -ENODEV;
2330
2331	for_each_mc_mclock(idev, pmc) {
2332		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2333			break;
2334	}
2335	if (!pmc)
 
 
2336		return -ESRCH;
2337
 
2338	sf_markstate(pmc);
2339	if (!delta) {
2340		if (!pmc->mca_sfcount[sfmode])
 
 
2341			return -EINVAL;
2342
2343		pmc->mca_sfcount[sfmode]--;
2344	}
2345	err = 0;
2346	for (i = 0; i < sfcount; i++) {
2347		int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2348
2349		changerec |= rv > 0;
2350		if (!err && rv < 0)
2351			err = rv;
2352	}
2353	if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2354	    pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2355	    pmc->mca_sfcount[MCAST_INCLUDE]) {
2356		struct ip6_sf_list *psf;
2357
2358		/* filter mode change */
2359		pmc->mca_sfmode = MCAST_INCLUDE;
2360		pmc->mca_crcount = idev->mc_qrv;
2361		idev->mc_ifc_count = pmc->mca_crcount;
2362		for_each_psf_mclock(pmc, psf)
2363			psf->sf_crcount = 0;
2364		mld_ifc_event(pmc->idev);
2365	} else if (sf_setstate(pmc) || changerec) {
2366		mld_ifc_event(pmc->idev);
2367	}
2368
2369	return err;
2370}
2371
2372/*
2373 * Add multicast single-source filter to the interface list
2374 * called with mc_lock
2375 */
2376static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2377	const struct in6_addr *psfsrc)
2378{
2379	struct ip6_sf_list *psf, *psf_prev;
2380
2381	psf_prev = NULL;
2382	for_each_psf_mclock(pmc, psf) {
2383		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2384			break;
2385		psf_prev = psf;
2386	}
2387	if (!psf) {
2388		psf = kzalloc(sizeof(*psf), GFP_KERNEL);
2389		if (!psf)
2390			return -ENOBUFS;
2391
2392		psf->sf_addr = *psfsrc;
2393		if (psf_prev) {
2394			rcu_assign_pointer(psf_prev->sf_next, psf);
2395		} else {
2396			rcu_assign_pointer(pmc->mca_sources, psf);
2397		}
2398	}
2399	psf->sf_count[sfmode]++;
2400	return 0;
2401}
2402
2403/* called with mc_lock */
2404static void sf_markstate(struct ifmcaddr6 *pmc)
2405{
2406	struct ip6_sf_list *psf;
2407	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2408
2409	for_each_psf_mclock(pmc, psf) {
2410		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2411			psf->sf_oldin = mca_xcount ==
2412				psf->sf_count[MCAST_EXCLUDE] &&
2413				!psf->sf_count[MCAST_INCLUDE];
2414		} else {
2415			psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2416		}
2417	}
2418}
2419
2420/* called with mc_lock */
2421static int sf_setstate(struct ifmcaddr6 *pmc)
2422{
2423	struct ip6_sf_list *psf, *dpsf;
2424	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2425	int qrv = pmc->idev->mc_qrv;
2426	int new_in, rv;
2427
2428	rv = 0;
2429	for_each_psf_mclock(pmc, psf) {
2430		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2431			new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2432				!psf->sf_count[MCAST_INCLUDE];
2433		} else
2434			new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2435		if (new_in) {
2436			if (!psf->sf_oldin) {
2437				struct ip6_sf_list *prev = NULL;
2438
2439				for_each_psf_tomb(pmc, dpsf) {
 
2440					if (ipv6_addr_equal(&dpsf->sf_addr,
2441					    &psf->sf_addr))
2442						break;
2443					prev = dpsf;
2444				}
2445				if (dpsf) {
2446					if (prev)
2447						rcu_assign_pointer(prev->sf_next,
2448								   mc_dereference(dpsf->sf_next,
2449										  pmc->idev));
2450					else
2451						rcu_assign_pointer(pmc->mca_tomb,
2452								   mc_dereference(dpsf->sf_next,
2453										  pmc->idev));
2454					kfree_rcu(dpsf, rcu);
2455				}
2456				psf->sf_crcount = qrv;
2457				rv++;
2458			}
2459		} else if (psf->sf_oldin) {
2460			psf->sf_crcount = 0;
2461			/*
2462			 * add or update "delete" records if an active filter
2463			 * is now inactive
2464			 */
2465
2466			for_each_psf_tomb(pmc, dpsf)
2467				if (ipv6_addr_equal(&dpsf->sf_addr,
2468				    &psf->sf_addr))
2469					break;
2470			if (!dpsf) {
2471				dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL);
2472				if (!dpsf)
2473					continue;
2474				*dpsf = *psf;
2475				rcu_assign_pointer(dpsf->sf_next,
2476						   mc_dereference(pmc->mca_tomb, pmc->idev));
2477				rcu_assign_pointer(pmc->mca_tomb, dpsf);
2478			}
2479			dpsf->sf_crcount = qrv;
2480			rv++;
2481		}
2482	}
2483	return rv;
2484}
2485
2486/*
2487 * Add multicast source filter list to the interface list
2488 * called with mc_lock
2489 */
2490static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2491			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2492			  int delta)
2493{
2494	struct ifmcaddr6 *pmc;
2495	int	isexclude;
2496	int	i, err;
2497
2498	if (!idev)
2499		return -ENODEV;
2500
2501	for_each_mc_mclock(idev, pmc) {
2502		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2503			break;
2504	}
2505	if (!pmc)
 
 
2506		return -ESRCH;
 
 
2507
2508	sf_markstate(pmc);
2509	isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2510	if (!delta)
2511		pmc->mca_sfcount[sfmode]++;
2512	err = 0;
2513	for (i = 0; i < sfcount; i++) {
2514		err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2515		if (err)
2516			break;
2517	}
2518	if (err) {
2519		int j;
2520
2521		if (!delta)
2522			pmc->mca_sfcount[sfmode]--;
2523		for (j = 0; j < i; j++)
2524			ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2525	} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2526		struct ip6_sf_list *psf;
2527
2528		/* filter mode change */
2529		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2530			pmc->mca_sfmode = MCAST_EXCLUDE;
2531		else if (pmc->mca_sfcount[MCAST_INCLUDE])
2532			pmc->mca_sfmode = MCAST_INCLUDE;
2533		/* else no filters; keep old mode for reports */
2534
2535		pmc->mca_crcount = idev->mc_qrv;
2536		idev->mc_ifc_count = pmc->mca_crcount;
2537		for_each_psf_mclock(pmc, psf)
2538			psf->sf_crcount = 0;
2539		mld_ifc_event(idev);
2540	} else if (sf_setstate(pmc)) {
2541		mld_ifc_event(idev);
2542	}
 
2543	return err;
2544}
2545
2546/* called with mc_lock */
2547static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2548{
2549	struct ip6_sf_list *psf, *nextpsf;
2550
2551	for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2552	     psf;
2553	     psf = nextpsf) {
2554		nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2555		kfree_rcu(psf, rcu);
2556	}
2557	RCU_INIT_POINTER(pmc->mca_tomb, NULL);
2558	for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2559	     psf;
2560	     psf = nextpsf) {
2561		nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2562		kfree_rcu(psf, rcu);
2563	}
2564	RCU_INIT_POINTER(pmc->mca_sources, NULL);
2565	pmc->mca_sfmode = MCAST_EXCLUDE;
2566	pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2567	pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2568}
2569
2570/* called with mc_lock */
2571static void igmp6_join_group(struct ifmcaddr6 *ma)
2572{
2573	unsigned long delay;
2574
2575	if (ma->mca_flags & MAF_NOREPORT)
2576		return;
2577
2578	igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2579
2580	delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
2581
2582	if (cancel_delayed_work(&ma->mca_work)) {
 
2583		refcount_dec(&ma->mca_refcnt);
2584		delay = ma->mca_work.timer.expires - jiffies;
2585	}
2586
2587	if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
2588		refcount_inc(&ma->mca_refcnt);
2589	ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
 
2590}
2591
2592static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2593			    struct inet6_dev *idev)
2594{
2595	struct ip6_sf_socklist *psl;
2596	int err;
2597
2598	psl = sock_dereference(iml->sflist, sk);
2599
2600	if (idev)
2601		mutex_lock(&idev->mc_lock);
2602
2603	if (!psl) {
2604		/* any-source empty exclude case */
2605		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2606	} else {
2607		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2608				     psl->sl_count, psl->sl_addr, 0);
2609		RCU_INIT_POINTER(iml->sflist, NULL);
2610		atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
2611			   &sk->sk_omem_alloc);
2612		kfree_rcu(psl, rcu);
2613	}
2614
2615	if (idev)
2616		mutex_unlock(&idev->mc_lock);
2617
2618	return err;
2619}
2620
2621/* called with mc_lock */
2622static void igmp6_leave_group(struct ifmcaddr6 *ma)
2623{
2624	if (mld_in_v1_mode(ma->idev)) {
2625		if (ma->mca_flags & MAF_LAST_REPORTER) {
2626			igmp6_send(&ma->mca_addr, ma->idev->dev,
2627				ICMPV6_MGM_REDUCTION);
2628		}
2629	} else {
2630		mld_add_delrec(ma->idev, ma);
2631		mld_ifc_event(ma->idev);
2632	}
2633}
2634
2635static void mld_gq_work(struct work_struct *work)
2636{
2637	struct inet6_dev *idev = container_of(to_delayed_work(work),
2638					      struct inet6_dev,
2639					      mc_gq_work);
2640
2641	mutex_lock(&idev->mc_lock);
2642	mld_send_report(idev, NULL);
2643	idev->mc_gq_running = 0;
2644	mutex_unlock(&idev->mc_lock);
2645
2646	in6_dev_put(idev);
2647}
2648
2649static void mld_ifc_work(struct work_struct *work)
2650{
2651	struct inet6_dev *idev = container_of(to_delayed_work(work),
2652					      struct inet6_dev,
2653					      mc_ifc_work);
2654
2655	mutex_lock(&idev->mc_lock);
2656	mld_send_cr(idev);
2657
2658	if (idev->mc_ifc_count) {
2659		idev->mc_ifc_count--;
2660		if (idev->mc_ifc_count)
2661			mld_ifc_start_work(idev,
2662					   unsolicited_report_interval(idev));
2663	}
2664	mutex_unlock(&idev->mc_lock);
2665	in6_dev_put(idev);
2666}
2667
2668/* called with mc_lock */
2669static void mld_ifc_event(struct inet6_dev *idev)
2670{
2671	if (mld_in_v1_mode(idev))
2672		return;
2673
2674	idev->mc_ifc_count = idev->mc_qrv;
2675	mld_ifc_start_work(idev, 1);
2676}
2677
2678static void mld_mca_work(struct work_struct *work)
2679{
2680	struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
2681					    struct ifmcaddr6, mca_work);
2682
2683	mutex_lock(&ma->idev->mc_lock);
2684	if (mld_in_v1_mode(ma->idev))
2685		igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2686	else
2687		mld_send_report(ma->idev, ma);
 
 
2688	ma->mca_flags |=  MAF_LAST_REPORTER;
2689	ma->mca_flags &= ~MAF_TIMER_RUNNING;
2690	mutex_unlock(&ma->idev->mc_lock);
2691
2692	ma_put(ma);
2693}
2694
2695/* Device changing type */
2696
2697void ipv6_mc_unmap(struct inet6_dev *idev)
2698{
2699	struct ifmcaddr6 *i;
2700
2701	/* Install multicast list, except for all-nodes (already installed) */
2702
2703	mutex_lock(&idev->mc_lock);
2704	for_each_mc_mclock(idev, i)
2705		igmp6_group_dropped(i);
2706	mutex_unlock(&idev->mc_lock);
2707}
2708
2709void ipv6_mc_remap(struct inet6_dev *idev)
2710{
2711	ipv6_mc_up(idev);
2712}
2713
2714/* Device going down */
 
2715void ipv6_mc_down(struct inet6_dev *idev)
2716{
2717	struct ifmcaddr6 *i;
2718
2719	mutex_lock(&idev->mc_lock);
2720	/* Withdraw multicast list */
2721	for_each_mc_mclock(idev, i)
 
 
 
2722		igmp6_group_dropped(i);
2723	mutex_unlock(&idev->mc_lock);
2724
2725	/* Should stop work after group drop. or we will
2726	 * start work again in mld_ifc_event()
2727	 */
2728	mld_query_stop_work(idev);
2729	mld_report_stop_work(idev);
2730
2731	mutex_lock(&idev->mc_lock);
2732	mld_ifc_stop_work(idev);
2733	mld_gq_stop_work(idev);
2734	mutex_unlock(&idev->mc_lock);
2735
2736	mld_dad_stop_work(idev);
2737}
2738
2739static void ipv6_mc_reset(struct inet6_dev *idev)
2740{
2741	idev->mc_qrv = sysctl_mld_qrv;
2742	idev->mc_qi = MLD_QI_DEFAULT;
2743	idev->mc_qri = MLD_QRI_DEFAULT;
2744	idev->mc_v1_seen = 0;
2745	idev->mc_maxdelay = unsolicited_report_interval(idev);
2746}
2747
2748/* Device going up */
2749
2750void ipv6_mc_up(struct inet6_dev *idev)
2751{
2752	struct ifmcaddr6 *i;
2753
2754	/* Install multicast list, except for all-nodes (already installed) */
2755
 
2756	ipv6_mc_reset(idev);
2757	mutex_lock(&idev->mc_lock);
2758	for_each_mc_mclock(idev, i) {
2759		mld_del_delrec(idev, i);
2760		igmp6_group_added(i);
2761	}
2762	mutex_unlock(&idev->mc_lock);
2763}
2764
2765/* IPv6 device initialization. */
2766
2767void ipv6_mc_init_dev(struct inet6_dev *idev)
2768{
 
 
2769	idev->mc_gq_running = 0;
2770	INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2771	RCU_INIT_POINTER(idev->mc_tomb, NULL);
2772	idev->mc_ifc_count = 0;
2773	INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2774	INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2775	INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2776	INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2777	skb_queue_head_init(&idev->mc_query_queue);
2778	skb_queue_head_init(&idev->mc_report_queue);
2779	spin_lock_init(&idev->mc_query_lock);
2780	spin_lock_init(&idev->mc_report_lock);
2781	mutex_init(&idev->mc_lock);
2782	ipv6_mc_reset(idev);
 
2783}
2784
2785/*
2786 *	Device is about to be destroyed: clean up.
2787 */
2788
2789void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2790{
2791	struct ifmcaddr6 *i;
2792
2793	/* Deactivate works */
2794	ipv6_mc_down(idev);
2795	mutex_lock(&idev->mc_lock);
2796	mld_clear_delrec(idev);
2797	mutex_unlock(&idev->mc_lock);
2798	mld_clear_query(idev);
2799	mld_clear_report(idev);
2800
2801	/* Delete all-nodes address. */
2802	/* We cannot call ipv6_dev_mc_dec() directly, our caller in
2803	 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2804	 * fail.
2805	 */
2806	__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2807
2808	if (idev->cnf.forwarding)
2809		__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2810
2811	mutex_lock(&idev->mc_lock);
2812	while ((i = mc_dereference(idev->mc_list, idev))) {
2813		rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2814
 
2815		ip6_mc_clear_src(i);
2816		ma_put(i);
 
2817	}
2818	mutex_unlock(&idev->mc_lock);
2819}
2820
2821static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2822{
2823	struct ifmcaddr6 *pmc;
2824
2825	ASSERT_RTNL();
2826
2827	mutex_lock(&idev->mc_lock);
2828	if (mld_in_v1_mode(idev)) {
2829		for_each_mc_mclock(idev, pmc)
 
2830			igmp6_join_group(pmc);
2831	} else {
 
2832		mld_send_report(idev, NULL);
2833	}
2834	mutex_unlock(&idev->mc_lock);
2835}
2836
2837static int ipv6_mc_netdev_event(struct notifier_block *this,
2838				unsigned long event,
2839				void *ptr)
2840{
2841	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2842	struct inet6_dev *idev = __in6_dev_get(dev);
2843
2844	switch (event) {
2845	case NETDEV_RESEND_IGMP:
2846		if (idev)
2847			ipv6_mc_rejoin_groups(idev);
2848		break;
2849	default:
2850		break;
2851	}
2852
2853	return NOTIFY_DONE;
2854}
2855
2856static struct notifier_block igmp6_netdev_notifier = {
2857	.notifier_call = ipv6_mc_netdev_event,
2858};
2859
2860#ifdef CONFIG_PROC_FS
2861struct igmp6_mc_iter_state {
2862	struct seq_net_private p;
2863	struct net_device *dev;
2864	struct inet6_dev *idev;
2865};
2866
2867#define igmp6_mc_seq_private(seq)	((struct igmp6_mc_iter_state *)(seq)->private)
2868
2869static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2870{
2871	struct ifmcaddr6 *im = NULL;
2872	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2873	struct net *net = seq_file_net(seq);
2874
2875	state->idev = NULL;
2876	for_each_netdev_rcu(net, state->dev) {
2877		struct inet6_dev *idev;
2878		idev = __in6_dev_get(state->dev);
2879		if (!idev)
2880			continue;
2881
2882		im = rcu_dereference(idev->mc_list);
2883		if (im) {
2884			state->idev = idev;
2885			break;
2886		}
 
2887	}
2888	return im;
2889}
2890
2891static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2892{
2893	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2894
2895	im = rcu_dereference(im->next);
2896	while (!im) {
 
 
 
2897		state->dev = next_net_device_rcu(state->dev);
2898		if (!state->dev) {
2899			state->idev = NULL;
2900			break;
2901		}
2902		state->idev = __in6_dev_get(state->dev);
2903		if (!state->idev)
2904			continue;
2905		im = rcu_dereference(state->idev->mc_list);
 
2906	}
2907	return im;
2908}
2909
2910static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2911{
2912	struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2913	if (im)
2914		while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2915			--pos;
2916	return pos ? NULL : im;
2917}
2918
2919static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2920	__acquires(RCU)
2921{
2922	rcu_read_lock();
2923	return igmp6_mc_get_idx(seq, *pos);
2924}
2925
2926static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2927{
2928	struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2929
2930	++*pos;
2931	return im;
2932}
2933
2934static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2935	__releases(RCU)
2936{
2937	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2938
2939	if (likely(state->idev))
 
2940		state->idev = NULL;
 
2941	state->dev = NULL;
2942	rcu_read_unlock();
2943}
2944
2945static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2946{
2947	struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2948	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2949
2950	seq_printf(seq,
2951		   "%-4d %-15s %pi6 %5d %08X %ld\n",
2952		   state->dev->ifindex, state->dev->name,
2953		   &im->mca_addr,
2954		   im->mca_users, im->mca_flags,
2955		   (im->mca_flags & MAF_TIMER_RUNNING) ?
2956		   jiffies_to_clock_t(im->mca_work.timer.expires - jiffies) : 0);
2957	return 0;
2958}
2959
2960static const struct seq_operations igmp6_mc_seq_ops = {
2961	.start	=	igmp6_mc_seq_start,
2962	.next	=	igmp6_mc_seq_next,
2963	.stop	=	igmp6_mc_seq_stop,
2964	.show	=	igmp6_mc_seq_show,
2965};
2966
2967struct igmp6_mcf_iter_state {
2968	struct seq_net_private p;
2969	struct net_device *dev;
2970	struct inet6_dev *idev;
2971	struct ifmcaddr6 *im;
2972};
2973
2974#define igmp6_mcf_seq_private(seq)	((struct igmp6_mcf_iter_state *)(seq)->private)
2975
2976static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2977{
2978	struct ip6_sf_list *psf = NULL;
2979	struct ifmcaddr6 *im = NULL;
2980	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2981	struct net *net = seq_file_net(seq);
2982
2983	state->idev = NULL;
2984	state->im = NULL;
2985	for_each_netdev_rcu(net, state->dev) {
2986		struct inet6_dev *idev;
2987		idev = __in6_dev_get(state->dev);
2988		if (unlikely(idev == NULL))
2989			continue;
2990
2991		im = rcu_dereference(idev->mc_list);
2992		if (likely(im)) {
2993			psf = rcu_dereference(im->mca_sources);
 
2994			if (likely(psf)) {
2995				state->im = im;
2996				state->idev = idev;
2997				break;
2998			}
 
2999		}
 
3000	}
3001	return psf;
3002}
3003
3004static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
3005{
3006	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3007
3008	psf = rcu_dereference(psf->sf_next);
3009	while (!psf) {
3010		state->im = rcu_dereference(state->im->next);
 
3011		while (!state->im) {
 
 
 
3012			state->dev = next_net_device_rcu(state->dev);
3013			if (!state->dev) {
3014				state->idev = NULL;
3015				goto out;
3016			}
3017			state->idev = __in6_dev_get(state->dev);
3018			if (!state->idev)
3019				continue;
3020			state->im = rcu_dereference(state->idev->mc_list);
 
3021		}
3022		psf = rcu_dereference(state->im->mca_sources);
 
 
 
3023	}
3024out:
3025	return psf;
3026}
3027
3028static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
3029{
3030	struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
3031	if (psf)
3032		while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
3033			--pos;
3034	return pos ? NULL : psf;
3035}
3036
3037static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
3038	__acquires(RCU)
3039{
3040	rcu_read_lock();
3041	return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
3042}
3043
3044static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3045{
3046	struct ip6_sf_list *psf;
3047	if (v == SEQ_START_TOKEN)
3048		psf = igmp6_mcf_get_first(seq);
3049	else
3050		psf = igmp6_mcf_get_next(seq, v);
3051	++*pos;
3052	return psf;
3053}
3054
3055static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
3056	__releases(RCU)
3057{
3058	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3059
3060	if (likely(state->im))
3061		state->im = NULL;
3062	if (likely(state->idev))
 
 
3063		state->idev = NULL;
3064
3065	state->dev = NULL;
3066	rcu_read_unlock();
3067}
3068
3069static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
3070{
3071	struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
3072	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3073
3074	if (v == SEQ_START_TOKEN) {
3075		seq_puts(seq, "Idx Device                Multicast Address                   Source Address    INC    EXC\n");
3076	} else {
3077		seq_printf(seq,
3078			   "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
3079			   state->dev->ifindex, state->dev->name,
3080			   &state->im->mca_addr,
3081			   &psf->sf_addr,
3082			   psf->sf_count[MCAST_INCLUDE],
3083			   psf->sf_count[MCAST_EXCLUDE]);
3084	}
3085	return 0;
3086}
3087
3088static const struct seq_operations igmp6_mcf_seq_ops = {
3089	.start	=	igmp6_mcf_seq_start,
3090	.next	=	igmp6_mcf_seq_next,
3091	.stop	=	igmp6_mcf_seq_stop,
3092	.show	=	igmp6_mcf_seq_show,
3093};
3094
3095static int __net_init igmp6_proc_init(struct net *net)
3096{
3097	int err;
3098
3099	err = -ENOMEM;
3100	if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
3101			sizeof(struct igmp6_mc_iter_state)))
3102		goto out;
3103	if (!proc_create_net("mcfilter6", 0444, net->proc_net,
3104			&igmp6_mcf_seq_ops,
3105			sizeof(struct igmp6_mcf_iter_state)))
3106		goto out_proc_net_igmp6;
3107
3108	err = 0;
3109out:
3110	return err;
3111
3112out_proc_net_igmp6:
3113	remove_proc_entry("igmp6", net->proc_net);
3114	goto out;
3115}
3116
3117static void __net_exit igmp6_proc_exit(struct net *net)
3118{
3119	remove_proc_entry("mcfilter6", net->proc_net);
3120	remove_proc_entry("igmp6", net->proc_net);
3121}
3122#else
3123static inline int igmp6_proc_init(struct net *net)
3124{
3125	return 0;
3126}
3127static inline void igmp6_proc_exit(struct net *net)
3128{
3129}
3130#endif
3131
3132static int __net_init igmp6_net_init(struct net *net)
3133{
3134	int err;
3135
3136	err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
3137				   SOCK_RAW, IPPROTO_ICMPV6, net);
3138	if (err < 0) {
3139		pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
3140		       err);
3141		goto out;
3142	}
3143
3144	inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
3145	net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL;
3146
3147	err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
3148				   SOCK_RAW, IPPROTO_ICMPV6, net);
3149	if (err < 0) {
3150		pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
3151		       err);
3152		goto out_sock_create;
3153	}
3154
3155	err = igmp6_proc_init(net);
3156	if (err)
3157		goto out_sock_create_autojoin;
3158
3159	return 0;
3160
3161out_sock_create_autojoin:
3162	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3163out_sock_create:
3164	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3165out:
3166	return err;
3167}
3168
3169static void __net_exit igmp6_net_exit(struct net *net)
3170{
3171	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3172	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3173	igmp6_proc_exit(net);
3174}
3175
3176static struct pernet_operations igmp6_net_ops = {
3177	.init = igmp6_net_init,
3178	.exit = igmp6_net_exit,
3179};
3180
3181int __init igmp6_init(void)
3182{
3183	int err;
3184
3185	err = register_pernet_subsys(&igmp6_net_ops);
3186	if (err)
3187		return err;
3188
3189	mld_wq = create_workqueue("mld");
3190	if (!mld_wq) {
3191		unregister_pernet_subsys(&igmp6_net_ops);
3192		return -ENOMEM;
3193	}
3194
3195	return err;
3196}
3197
3198int __init igmp6_late_init(void)
3199{
3200	return register_netdevice_notifier(&igmp6_netdev_notifier);
3201}
3202
3203void igmp6_cleanup(void)
3204{
3205	unregister_pernet_subsys(&igmp6_net_ops);
3206	destroy_workqueue(mld_wq);
3207}
3208
3209void igmp6_late_cleanup(void)
3210{
3211	unregister_netdevice_notifier(&igmp6_netdev_notifier);
3212}