Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *	Linux IPv6 multicast routing support for BSD pim6sd
   3 *	Based on net/ipv4/ipmr.c.
   4 *
   5 *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
   6 *		LSIIT Laboratory, Strasbourg, France
   7 *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
   8 *		6WIND, Paris, France
   9 *	Copyright (C)2007,2008 USAGI/WIDE Project
  10 *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
  11 *
  12 *	This program is free software; you can redistribute it and/or
  13 *	modify it under the terms of the GNU General Public License
  14 *	as published by the Free Software Foundation; either version
  15 *	2 of the License, or (at your option) any later version.
  16 *
  17 */
  18
  19#include <asm/uaccess.h>
  20#include <linux/types.h>
  21#include <linux/sched.h>
  22#include <linux/errno.h>
  23#include <linux/timer.h>
  24#include <linux/mm.h>
  25#include <linux/kernel.h>
  26#include <linux/fcntl.h>
  27#include <linux/stat.h>
  28#include <linux/socket.h>
  29#include <linux/inet.h>
  30#include <linux/netdevice.h>
  31#include <linux/inetdevice.h>
  32#include <linux/proc_fs.h>
  33#include <linux/seq_file.h>
  34#include <linux/init.h>
  35#include <linux/slab.h>
  36#include <linux/compat.h>
 
  37#include <net/protocol.h>
  38#include <linux/skbuff.h>
  39#include <net/sock.h>
  40#include <net/raw.h>
  41#include <linux/notifier.h>
  42#include <linux/if_arp.h>
  43#include <net/checksum.h>
  44#include <net/netlink.h>
  45#include <net/fib_rules.h>
  46
  47#include <net/ipv6.h>
  48#include <net/ip6_route.h>
  49#include <linux/mroute6.h>
  50#include <linux/pim.h>
  51#include <net/addrconf.h>
  52#include <linux/netfilter_ipv6.h>
  53#include <linux/export.h>
  54#include <net/ip6_checksum.h>
  55#include <linux/netconf.h>
 
  56
  57struct mr6_table {
  58	struct list_head	list;
  59	possible_net_t		net;
  60	u32			id;
  61	struct sock		*mroute6_sk;
  62	struct timer_list	ipmr_expire_timer;
  63	struct list_head	mfc6_unres_queue;
  64	struct list_head	mfc6_cache_array[MFC6_LINES];
  65	struct mif_device	vif6_table[MAXMIFS];
  66	int			maxvif;
  67	atomic_t		cache_resolve_queue_len;
  68	bool			mroute_do_assert;
  69	bool			mroute_do_pim;
  70#ifdef CONFIG_IPV6_PIMSM_V2
  71	int			mroute_reg_vif_num;
  72#endif
  73};
  74
  75struct ip6mr_rule {
  76	struct fib_rule		common;
  77};
  78
  79struct ip6mr_result {
  80	struct mr6_table	*mrt;
  81};
  82
  83/* Big lock, protecting vif table, mrt cache and mroute socket state.
  84   Note that the changes are semaphored via rtnl_lock.
  85 */
  86
  87static DEFINE_RWLOCK(mrt_lock);
  88
  89/*
  90 *	Multicast router control variables
  91 */
 
  92
  93#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
  94
  95/* Special spinlock for queue of unresolved entries */
  96static DEFINE_SPINLOCK(mfc_unres_lock);
  97
  98/* We return to original Alan's scheme. Hash table of resolved
  99   entries is changed only in process context and protected
 100   with weak lock mrt_lock. Queue of unresolved entries is protected
 101   with strong spinlock mfc_unres_lock.
 102
 103   In this case data path is free of exclusive locks at all.
 104 */
 105
 106static struct kmem_cache *mrt_cachep __read_mostly;
 107
 108static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
 109static void ip6mr_free_table(struct mr6_table *mrt);
 110
 111static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 112			   struct sk_buff *skb, struct mfc6_cache *cache);
 113static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
 
 114			      mifi_t mifi, int assert);
 115static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 116			       struct mfc6_cache *c, struct rtmsg *rtm);
 117static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
 118			      int cmd);
 
 
 
 119static int ip6mr_rtm_dumproute(struct sk_buff *skb,
 120			       struct netlink_callback *cb);
 121static void mroute_clean_tables(struct mr6_table *mrt, bool all);
 122static void ipmr_expire_process(unsigned long arg);
 123
 124#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 125#define ip6mr_for_each_table(mrt, net) \
 126	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127
 128static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 129{
 130	struct mr6_table *mrt;
 131
 132	ip6mr_for_each_table(mrt, net) {
 133		if (mrt->id == id)
 134			return mrt;
 135	}
 136	return NULL;
 137}
 138
 
 
 
 
 
 
 
 
 
 
 139static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 140			    struct mr6_table **mrt)
 141{
 142	int err;
 143	struct ip6mr_result res;
 144	struct fib_lookup_arg arg = {
 145		.result = &res,
 146		.flags = FIB_LOOKUP_NOREF,
 147	};
 148
 
 
 
 149	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 150			       flowi6_to_flowi(flp6), 0, &arg);
 151	if (err < 0)
 152		return err;
 153	*mrt = res.mrt;
 154	return 0;
 155}
 156
 157static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
 158			     int flags, struct fib_lookup_arg *arg)
 159{
 160	struct ip6mr_result *res = arg->result;
 161	struct mr6_table *mrt;
 162
 163	switch (rule->action) {
 164	case FR_ACT_TO_TBL:
 165		break;
 166	case FR_ACT_UNREACHABLE:
 167		return -ENETUNREACH;
 168	case FR_ACT_PROHIBIT:
 169		return -EACCES;
 170	case FR_ACT_BLACKHOLE:
 171	default:
 172		return -EINVAL;
 173	}
 174
 175	mrt = ip6mr_get_table(rule->fr_net, rule->table);
 
 
 176	if (!mrt)
 177		return -EAGAIN;
 178	res->mrt = mrt;
 179	return 0;
 180}
 181
 182static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
 183{
 184	return 1;
 185}
 186
 187static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
 188	FRA_GENERIC_POLICY,
 189};
 190
 191static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
 192				struct fib_rule_hdr *frh, struct nlattr **tb)
 
 193{
 194	return 0;
 195}
 196
 197static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 198			      struct nlattr **tb)
 199{
 200	return 1;
 201}
 202
 203static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
 204			   struct fib_rule_hdr *frh)
 205{
 206	frh->dst_len = 0;
 207	frh->src_len = 0;
 208	frh->tos     = 0;
 209	return 0;
 210}
 211
 212static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
 213	.family		= RTNL_FAMILY_IP6MR,
 214	.rule_size	= sizeof(struct ip6mr_rule),
 215	.addr_size	= sizeof(struct in6_addr),
 216	.action		= ip6mr_rule_action,
 217	.match		= ip6mr_rule_match,
 218	.configure	= ip6mr_rule_configure,
 219	.compare	= ip6mr_rule_compare,
 220	.fill		= ip6mr_rule_fill,
 221	.nlgroup	= RTNLGRP_IPV6_RULE,
 222	.policy		= ip6mr_rule_policy,
 223	.owner		= THIS_MODULE,
 224};
 225
 226static int __net_init ip6mr_rules_init(struct net *net)
 227{
 228	struct fib_rules_ops *ops;
 229	struct mr6_table *mrt;
 230	int err;
 231
 232	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
 233	if (IS_ERR(ops))
 234		return PTR_ERR(ops);
 235
 236	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
 237
 238	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 239	if (!mrt) {
 240		err = -ENOMEM;
 241		goto err1;
 242	}
 243
 244	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
 245	if (err < 0)
 246		goto err2;
 247
 248	net->ipv6.mr6_rules_ops = ops;
 249	return 0;
 250
 251err2:
 
 252	ip6mr_free_table(mrt);
 
 253err1:
 254	fib_rules_unregister(ops);
 255	return err;
 256}
 257
 258static void __net_exit ip6mr_rules_exit(struct net *net)
 259{
 260	struct mr6_table *mrt, *next;
 261
 262	rtnl_lock();
 263	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
 264		list_del(&mrt->list);
 265		ip6mr_free_table(mrt);
 266	}
 267	fib_rules_unregister(net->ipv6.mr6_rules_ops);
 268	rtnl_unlock();
 269}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 270#else
 271#define ip6mr_for_each_table(mrt, net) \
 272	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
 273
 274static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 275{
 276	return net->ipv6.mrt6;
 277}
 278
 
 
 279static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 280			    struct mr6_table **mrt)
 281{
 282	*mrt = net->ipv6.mrt6;
 283	return 0;
 284}
 285
 286static int __net_init ip6mr_rules_init(struct net *net)
 287{
 288	net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
 289	return net->ipv6.mrt6 ? 0 : -ENOMEM;
 
 
 
 
 
 290}
 291
 292static void __net_exit ip6mr_rules_exit(struct net *net)
 293{
 294	rtnl_lock();
 295	ip6mr_free_table(net->ipv6.mrt6);
 296	net->ipv6.mrt6 = NULL;
 297	rtnl_unlock();
 298}
 299#endif
 300
 301static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 
 302{
 303	struct mr6_table *mrt;
 304	unsigned int i;
 305
 306	mrt = ip6mr_get_table(net, id);
 307	if (mrt)
 308		return mrt;
 309
 310	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
 311	if (!mrt)
 312		return NULL;
 313	mrt->id = id;
 314	write_pnet(&mrt->net, net);
 315
 316	/* Forwarding cache */
 317	for (i = 0; i < MFC6_LINES; i++)
 318		INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
 
 
 319
 320	INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
 
 
 321
 322	setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
 323		    (unsigned long)mrt);
 
 
 
 
 
 
 324
 325#ifdef CONFIG_IPV6_PIMSM_V2
 326	mrt->mroute_reg_vif_num = -1;
 327#endif
 328#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 329	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
 330#endif
 331	return mrt;
 332}
 333
 334static void ip6mr_free_table(struct mr6_table *mrt)
 335{
 336	del_timer_sync(&mrt->ipmr_expire_timer);
 337	mroute_clean_tables(mrt, true);
 338	kfree(mrt);
 339}
 340
 341#ifdef CONFIG_PROC_FS
 342
 343struct ipmr_mfc_iter {
 344	struct seq_net_private p;
 345	struct mr6_table *mrt;
 346	struct list_head *cache;
 347	int ct;
 348};
 349
 
 
 
 
 350
 351static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
 352					   struct ipmr_mfc_iter *it, loff_t pos)
 353{
 354	struct mr6_table *mrt = it->mrt;
 355	struct mfc6_cache *mfc;
 356
 357	read_lock(&mrt_lock);
 358	for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
 359		it->cache = &mrt->mfc6_cache_array[it->ct];
 360		list_for_each_entry(mfc, it->cache, list)
 361			if (pos-- == 0)
 362				return mfc;
 363	}
 364	read_unlock(&mrt_lock);
 365
 366	spin_lock_bh(&mfc_unres_lock);
 367	it->cache = &mrt->mfc6_unres_queue;
 368	list_for_each_entry(mfc, it->cache, list)
 369		if (pos-- == 0)
 370			return mfc;
 371	spin_unlock_bh(&mfc_unres_lock);
 372
 373	it->cache = NULL;
 374	return NULL;
 375}
 376
 377/*
 378 *	The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
 379 */
 380
 381struct ipmr_vif_iter {
 382	struct seq_net_private p;
 383	struct mr6_table *mrt;
 384	int ct;
 385};
 386
 387static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
 388					    struct ipmr_vif_iter *iter,
 389					    loff_t pos)
 390{
 391	struct mr6_table *mrt = iter->mrt;
 392
 393	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
 394		if (!MIF_EXISTS(mrt, iter->ct))
 395			continue;
 396		if (pos-- == 0)
 397			return &mrt->vif6_table[iter->ct];
 398	}
 399	return NULL;
 400}
 401
 
 
 
 
 
 402static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
 403	__acquires(mrt_lock)
 404{
 405	struct ipmr_vif_iter *iter = seq->private;
 406	struct net *net = seq_file_net(seq);
 407	struct mr6_table *mrt;
 408
 409	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 410	if (!mrt)
 
 
 411		return ERR_PTR(-ENOENT);
 
 412
 413	iter->mrt = mrt;
 414
 415	read_lock(&mrt_lock);
 416	return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
 417		: SEQ_START_TOKEN;
 418}
 419
 420static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 421{
 422	struct ipmr_vif_iter *iter = seq->private;
 423	struct net *net = seq_file_net(seq);
 424	struct mr6_table *mrt = iter->mrt;
 425
 426	++*pos;
 427	if (v == SEQ_START_TOKEN)
 428		return ip6mr_vif_seq_idx(net, iter, 0);
 429
 430	while (++iter->ct < mrt->maxvif) {
 431		if (!MIF_EXISTS(mrt, iter->ct))
 432			continue;
 433		return &mrt->vif6_table[iter->ct];
 434	}
 435	return NULL;
 436}
 437
 438static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
 439	__releases(mrt_lock)
 440{
 441	read_unlock(&mrt_lock);
 442}
 443
 444static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
 445{
 446	struct ipmr_vif_iter *iter = seq->private;
 447	struct mr6_table *mrt = iter->mrt;
 448
 449	if (v == SEQ_START_TOKEN) {
 450		seq_puts(seq,
 451			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
 452	} else {
 453		const struct mif_device *vif = v;
 454		const char *name = vif->dev ? vif->dev->name : "none";
 
 
 
 
 455
 456		seq_printf(seq,
 457			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
 458			   vif - mrt->vif6_table,
 459			   name, vif->bytes_in, vif->pkt_in,
 460			   vif->bytes_out, vif->pkt_out,
 461			   vif->flags);
 462	}
 463	return 0;
 464}
 465
 466static const struct seq_operations ip6mr_vif_seq_ops = {
 467	.start = ip6mr_vif_seq_start,
 468	.next  = ip6mr_vif_seq_next,
 469	.stop  = ip6mr_vif_seq_stop,
 470	.show  = ip6mr_vif_seq_show,
 471};
 472
 473static int ip6mr_vif_open(struct inode *inode, struct file *file)
 474{
 475	return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
 476			    sizeof(struct ipmr_vif_iter));
 477}
 478
 479static const struct file_operations ip6mr_vif_fops = {
 480	.owner	 = THIS_MODULE,
 481	.open    = ip6mr_vif_open,
 482	.read    = seq_read,
 483	.llseek  = seq_lseek,
 484	.release = seq_release_net,
 485};
 486
 487static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 488{
 489	struct ipmr_mfc_iter *it = seq->private;
 490	struct net *net = seq_file_net(seq);
 491	struct mr6_table *mrt;
 492
 493	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 494	if (!mrt)
 495		return ERR_PTR(-ENOENT);
 496
 497	it->mrt = mrt;
 498	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
 499		: SEQ_START_TOKEN;
 500}
 501
 502static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 503{
 504	struct mfc6_cache *mfc = v;
 505	struct ipmr_mfc_iter *it = seq->private;
 506	struct net *net = seq_file_net(seq);
 507	struct mr6_table *mrt = it->mrt;
 508
 509	++*pos;
 510
 511	if (v == SEQ_START_TOKEN)
 512		return ipmr_mfc_seq_idx(net, seq->private, 0);
 513
 514	if (mfc->list.next != it->cache)
 515		return list_entry(mfc->list.next, struct mfc6_cache, list);
 516
 517	if (it->cache == &mrt->mfc6_unres_queue)
 518		goto end_of_list;
 519
 520	BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
 521
 522	while (++it->ct < MFC6_LINES) {
 523		it->cache = &mrt->mfc6_cache_array[it->ct];
 524		if (list_empty(it->cache))
 525			continue;
 526		return list_first_entry(it->cache, struct mfc6_cache, list);
 527	}
 528
 529	/* exhausted cache_array, show unresolved */
 530	read_unlock(&mrt_lock);
 531	it->cache = &mrt->mfc6_unres_queue;
 532	it->ct = 0;
 533
 534	spin_lock_bh(&mfc_unres_lock);
 535	if (!list_empty(it->cache))
 536		return list_first_entry(it->cache, struct mfc6_cache, list);
 537
 538 end_of_list:
 539	spin_unlock_bh(&mfc_unres_lock);
 540	it->cache = NULL;
 541
 542	return NULL;
 543}
 544
 545static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
 546{
 547	struct ipmr_mfc_iter *it = seq->private;
 548	struct mr6_table *mrt = it->mrt;
 549
 550	if (it->cache == &mrt->mfc6_unres_queue)
 551		spin_unlock_bh(&mfc_unres_lock);
 552	else if (it->cache == &mrt->mfc6_cache_array[it->ct])
 553		read_unlock(&mrt_lock);
 554}
 555
 556static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
 557{
 558	int n;
 559
 560	if (v == SEQ_START_TOKEN) {
 561		seq_puts(seq,
 562			 "Group                            "
 563			 "Origin                           "
 564			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
 565	} else {
 566		const struct mfc6_cache *mfc = v;
 567		const struct ipmr_mfc_iter *it = seq->private;
 568		struct mr6_table *mrt = it->mrt;
 569
 570		seq_printf(seq, "%pI6 %pI6 %-3hd",
 571			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
 572			   mfc->mf6c_parent);
 573
 574		if (it->cache != &mrt->mfc6_unres_queue) {
 575			seq_printf(seq, " %8lu %8lu %8lu",
 576				   mfc->mfc_un.res.pkt,
 577				   mfc->mfc_un.res.bytes,
 578				   mfc->mfc_un.res.wrong_if);
 579			for (n = mfc->mfc_un.res.minvif;
 580			     n < mfc->mfc_un.res.maxvif; n++) {
 581				if (MIF_EXISTS(mrt, n) &&
 582				    mfc->mfc_un.res.ttls[n] < 255)
 583					seq_printf(seq,
 584						   " %2d:%-3d",
 585						   n, mfc->mfc_un.res.ttls[n]);
 586			}
 587		} else {
 588			/* unresolved mfc_caches don't contain
 589			 * pkt, bytes and wrong_if values
 590			 */
 591			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
 592		}
 593		seq_putc(seq, '\n');
 594	}
 595	return 0;
 596}
 597
 598static const struct seq_operations ipmr_mfc_seq_ops = {
 599	.start = ipmr_mfc_seq_start,
 600	.next  = ipmr_mfc_seq_next,
 601	.stop  = ipmr_mfc_seq_stop,
 602	.show  = ipmr_mfc_seq_show,
 603};
 604
 605static int ipmr_mfc_open(struct inode *inode, struct file *file)
 606{
 607	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
 608			    sizeof(struct ipmr_mfc_iter));
 609}
 610
 611static const struct file_operations ip6mr_mfc_fops = {
 612	.owner	 = THIS_MODULE,
 613	.open    = ipmr_mfc_open,
 614	.read    = seq_read,
 615	.llseek  = seq_lseek,
 616	.release = seq_release_net,
 617};
 618#endif
 619
 620#ifdef CONFIG_IPV6_PIMSM_V2
 621
 622static int pim6_rcv(struct sk_buff *skb)
 623{
 624	struct pimreghdr *pim;
 625	struct ipv6hdr   *encap;
 626	struct net_device  *reg_dev = NULL;
 627	struct net *net = dev_net(skb->dev);
 628	struct mr6_table *mrt;
 629	struct flowi6 fl6 = {
 630		.flowi6_iif	= skb->dev->ifindex,
 631		.flowi6_mark	= skb->mark,
 632	};
 633	int reg_vif_num;
 634
 635	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
 636		goto drop;
 637
 638	pim = (struct pimreghdr *)skb_transport_header(skb);
 639	if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
 640	    (pim->flags & PIM_NULL_REGISTER) ||
 641	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 642			     sizeof(*pim), IPPROTO_PIM,
 643			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
 644	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
 645		goto drop;
 646
 647	/* check if the inner packet is destined to mcast group */
 648	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
 649				   sizeof(*pim));
 650
 651	if (!ipv6_addr_is_multicast(&encap->daddr) ||
 652	    encap->payload_len == 0 ||
 653	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
 654		goto drop;
 655
 656	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 657		goto drop;
 658	reg_vif_num = mrt->mroute_reg_vif_num;
 659
 660	read_lock(&mrt_lock);
 
 661	if (reg_vif_num >= 0)
 662		reg_dev = mrt->vif6_table[reg_vif_num].dev;
 663	if (reg_dev)
 664		dev_hold(reg_dev);
 665	read_unlock(&mrt_lock);
 666
 667	if (!reg_dev)
 668		goto drop;
 669
 670	skb->mac_header = skb->network_header;
 671	skb_pull(skb, (u8 *)encap - skb->data);
 672	skb_reset_network_header(skb);
 673	skb->protocol = htons(ETH_P_IPV6);
 674	skb->ip_summed = CHECKSUM_NONE;
 675
 676	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
 677
 678	netif_rx(skb);
 679
 680	dev_put(reg_dev);
 681	return 0;
 682 drop:
 683	kfree_skb(skb);
 684	return 0;
 685}
 686
 687static const struct inet6_protocol pim6_protocol = {
 688	.handler	=	pim6_rcv,
 689};
 690
 691/* Service routines creating virtual interfaces: PIMREG */
 692
 693static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
 694				      struct net_device *dev)
 695{
 696	struct net *net = dev_net(dev);
 697	struct mr6_table *mrt;
 698	struct flowi6 fl6 = {
 699		.flowi6_oif	= dev->ifindex,
 700		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 701		.flowi6_mark	= skb->mark,
 702	};
 703	int err;
 704
 705	err = ip6mr_fib_lookup(net, &fl6, &mrt);
 706	if (err < 0) {
 707		kfree_skb(skb);
 708		return err;
 709	}
 710
 711	read_lock(&mrt_lock);
 712	dev->stats.tx_bytes += skb->len;
 713	dev->stats.tx_packets++;
 714	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
 715	read_unlock(&mrt_lock);
 
 
 
 
 
 
 
 
 
 716	kfree_skb(skb);
 717	return NETDEV_TX_OK;
 718}
 719
 720static int reg_vif_get_iflink(const struct net_device *dev)
 721{
 722	return 0;
 723}
 724
 725static const struct net_device_ops reg_vif_netdev_ops = {
 726	.ndo_start_xmit	= reg_vif_xmit,
 727	.ndo_get_iflink = reg_vif_get_iflink,
 728};
 729
 730static void reg_vif_setup(struct net_device *dev)
 731{
 732	dev->type		= ARPHRD_PIMREG;
 733	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
 734	dev->flags		= IFF_NOARP;
 735	dev->netdev_ops		= &reg_vif_netdev_ops;
 736	dev->destructor		= free_netdev;
 737	dev->features		|= NETIF_F_NETNS_LOCAL;
 738}
 739
 740static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
 741{
 742	struct net_device *dev;
 743	char name[IFNAMSIZ];
 744
 745	if (mrt->id == RT6_TABLE_DFLT)
 746		sprintf(name, "pim6reg");
 747	else
 748		sprintf(name, "pim6reg%u", mrt->id);
 749
 750	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
 751	if (!dev)
 752		return NULL;
 753
 754	dev_net_set(dev, net);
 755
 756	if (register_netdevice(dev)) {
 757		free_netdev(dev);
 758		return NULL;
 759	}
 760
 761	if (dev_open(dev))
 762		goto failure;
 763
 764	dev_hold(dev);
 765	return dev;
 766
 767failure:
 768	unregister_netdevice(dev);
 769	return NULL;
 770}
 771#endif
 772
 773/*
 774 *	Delete a VIF entry
 775 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 776
 777static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
 
 
 778{
 779	struct mif_device *v;
 780	struct net_device *dev;
 781	struct inet6_dev *in6_dev;
 782
 783	if (vifi < 0 || vifi >= mrt->maxvif)
 784		return -EADDRNOTAVAIL;
 785
 786	v = &mrt->vif6_table[vifi];
 787
 788	write_lock_bh(&mrt_lock);
 789	dev = v->dev;
 790	v->dev = NULL;
 791
 792	if (!dev) {
 793		write_unlock_bh(&mrt_lock);
 794		return -EADDRNOTAVAIL;
 795	}
 
 
 
 
 
 796
 797#ifdef CONFIG_IPV6_PIMSM_V2
 798	if (vifi == mrt->mroute_reg_vif_num)
 799		mrt->mroute_reg_vif_num = -1;
 
 
 800#endif
 801
 802	if (vifi + 1 == mrt->maxvif) {
 803		int tmp;
 804		for (tmp = vifi - 1; tmp >= 0; tmp--) {
 805			if (MIF_EXISTS(mrt, tmp))
 806				break;
 807		}
 808		mrt->maxvif = tmp + 1;
 809	}
 810
 811	write_unlock_bh(&mrt_lock);
 812
 813	dev_set_allmulti(dev, -1);
 814
 815	in6_dev = __in6_dev_get(dev);
 816	if (in6_dev) {
 817		in6_dev->cnf.mc_forwarding--;
 818		inet6_netconf_notify_devconf(dev_net(dev),
 819					     NETCONFA_MC_FORWARDING,
 820					     dev->ifindex, &in6_dev->cnf);
 821	}
 822
 823	if (v->flags & MIFF_REGISTER)
 824		unregister_netdevice_queue(dev, head);
 825
 826	dev_put(dev);
 827	return 0;
 828}
 829
 
 
 
 
 
 
 
 830static inline void ip6mr_cache_free(struct mfc6_cache *c)
 831{
 832	kmem_cache_free(mrt_cachep, c);
 833}
 834
 835/* Destroy an unresolved cache entry, killing queued skbs
 836   and reporting error to netlink readers.
 837 */
 838
 839static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
 840{
 841	struct net *net = read_pnet(&mrt->net);
 842	struct sk_buff *skb;
 843
 844	atomic_dec(&mrt->cache_resolve_queue_len);
 845
 846	while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
 847		if (ipv6_hdr(skb)->version == 0) {
 848			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 
 849			nlh->nlmsg_type = NLMSG_ERROR;
 850			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
 851			skb_trim(skb, nlh->nlmsg_len);
 852			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
 853			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
 854		} else
 855			kfree_skb(skb);
 856	}
 857
 858	ip6mr_cache_free(c);
 859}
 860
 861
 862/* Timer process for all the unresolved queue. */
 863
 864static void ipmr_do_expire_process(struct mr6_table *mrt)
 865{
 866	unsigned long now = jiffies;
 867	unsigned long expires = 10 * HZ;
 868	struct mfc6_cache *c, *next;
 869
 870	list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
 871		if (time_after(c->mfc_un.unres.expires, now)) {
 872			/* not yet... */
 873			unsigned long interval = c->mfc_un.unres.expires - now;
 874			if (interval < expires)
 875				expires = interval;
 876			continue;
 877		}
 878
 879		list_del(&c->list);
 880		mr6_netlink_event(mrt, c, RTM_DELROUTE);
 881		ip6mr_destroy_unres(mrt, c);
 882	}
 883
 884	if (!list_empty(&mrt->mfc6_unres_queue))
 885		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 886}
 887
 888static void ipmr_expire_process(unsigned long arg)
 889{
 890	struct mr6_table *mrt = (struct mr6_table *)arg;
 891
 892	if (!spin_trylock(&mfc_unres_lock)) {
 893		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
 894		return;
 895	}
 896
 897	if (!list_empty(&mrt->mfc6_unres_queue))
 898		ipmr_do_expire_process(mrt);
 899
 900	spin_unlock(&mfc_unres_lock);
 901}
 902
 903/* Fill oifs list. It is called under write locked mrt_lock. */
 904
 905static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
 
 906				    unsigned char *ttls)
 907{
 908	int vifi;
 909
 910	cache->mfc_un.res.minvif = MAXMIFS;
 911	cache->mfc_un.res.maxvif = 0;
 912	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
 913
 914	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
 915		if (MIF_EXISTS(mrt, vifi) &&
 916		    ttls[vifi] && ttls[vifi] < 255) {
 917			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
 918			if (cache->mfc_un.res.minvif > vifi)
 919				cache->mfc_un.res.minvif = vifi;
 920			if (cache->mfc_un.res.maxvif <= vifi)
 921				cache->mfc_un.res.maxvif = vifi + 1;
 922		}
 923	}
 
 924}
 925
 926static int mif6_add(struct net *net, struct mr6_table *mrt,
 927		    struct mif6ctl *vifc, int mrtsock)
 928{
 929	int vifi = vifc->mif6c_mifi;
 930	struct mif_device *v = &mrt->vif6_table[vifi];
 931	struct net_device *dev;
 932	struct inet6_dev *in6_dev;
 933	int err;
 934
 935	/* Is vif busy ? */
 936	if (MIF_EXISTS(mrt, vifi))
 937		return -EADDRINUSE;
 938
 939	switch (vifc->mif6c_flags) {
 940#ifdef CONFIG_IPV6_PIMSM_V2
 941	case MIFF_REGISTER:
 942		/*
 943		 * Special Purpose VIF in PIM
 944		 * All the packets will be sent to the daemon
 945		 */
 946		if (mrt->mroute_reg_vif_num >= 0)
 947			return -EADDRINUSE;
 948		dev = ip6mr_reg_vif(net, mrt);
 949		if (!dev)
 950			return -ENOBUFS;
 951		err = dev_set_allmulti(dev, 1);
 952		if (err) {
 953			unregister_netdevice(dev);
 954			dev_put(dev);
 955			return err;
 956		}
 957		break;
 958#endif
 959	case 0:
 960		dev = dev_get_by_index(net, vifc->mif6c_pifi);
 961		if (!dev)
 962			return -EADDRNOTAVAIL;
 963		err = dev_set_allmulti(dev, 1);
 964		if (err) {
 965			dev_put(dev);
 966			return err;
 967		}
 968		break;
 969	default:
 970		return -EINVAL;
 971	}
 972
 973	in6_dev = __in6_dev_get(dev);
 974	if (in6_dev) {
 975		in6_dev->cnf.mc_forwarding++;
 976		inet6_netconf_notify_devconf(dev_net(dev),
 977					     NETCONFA_MC_FORWARDING,
 978					     dev->ifindex, &in6_dev->cnf);
 979	}
 980
 981	/*
 982	 *	Fill in the VIF structures
 983	 */
 984	v->rate_limit = vifc->vifc_rate_limit;
 985	v->flags = vifc->mif6c_flags;
 986	if (!mrtsock)
 987		v->flags |= VIFF_STATIC;
 988	v->threshold = vifc->vifc_threshold;
 989	v->bytes_in = 0;
 990	v->bytes_out = 0;
 991	v->pkt_in = 0;
 992	v->pkt_out = 0;
 993	v->link = dev->ifindex;
 994	if (v->flags & MIFF_REGISTER)
 995		v->link = dev_get_iflink(dev);
 996
 997	/* And finish update writing critical data */
 998	write_lock_bh(&mrt_lock);
 999	v->dev = dev;
 
1000#ifdef CONFIG_IPV6_PIMSM_V2
1001	if (v->flags & MIFF_REGISTER)
1002		mrt->mroute_reg_vif_num = vifi;
1003#endif
1004	if (vifi + 1 > mrt->maxvif)
1005		mrt->maxvif = vifi + 1;
1006	write_unlock_bh(&mrt_lock);
 
 
1007	return 0;
1008}
1009
1010static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1011					   const struct in6_addr *origin,
1012					   const struct in6_addr *mcastgrp)
1013{
1014	int line = MFC6_HASH(mcastgrp, origin);
1015	struct mfc6_cache *c;
1016
1017	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1018		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1019		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1020			return c;
1021	}
1022	return NULL;
1023}
1024
1025/* Look for a (*,*,oif) entry */
1026static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1027						      mifi_t mifi)
1028{
1029	int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1030	struct mfc6_cache *c;
1031
1032	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1033		if (ipv6_addr_any(&c->mf6c_origin) &&
1034		    ipv6_addr_any(&c->mf6c_mcastgrp) &&
1035		    (c->mfc_un.res.ttls[mifi] < 255))
1036			return c;
1037
1038	return NULL;
1039}
1040
1041/* Look for a (*,G) entry */
1042static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1043					       struct in6_addr *mcastgrp,
1044					       mifi_t mifi)
1045{
1046	int line = MFC6_HASH(mcastgrp, &in6addr_any);
1047	struct mfc6_cache *c, *proxy;
 
 
1048
1049	if (ipv6_addr_any(mcastgrp))
1050		goto skip;
 
 
1051
1052	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1053		if (ipv6_addr_any(&c->mf6c_origin) &&
1054		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1055			if (c->mfc_un.res.ttls[mifi] < 255)
1056				return c;
1057
1058			/* It's ok if the mifi is part of the static tree */
1059			proxy = ip6mr_cache_find_any_parent(mrt,
1060							    c->mf6c_parent);
1061			if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1062				return c;
1063		}
1064
1065skip:
1066	return ip6mr_cache_find_any_parent(mrt, mifi);
1067}
1068
1069/*
1070 *	Allocate a multicast cache entry
1071 */
1072static struct mfc6_cache *ip6mr_cache_alloc(void)
1073{
1074	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1075	if (!c)
1076		return NULL;
1077	c->mfc_un.res.minvif = MAXMIFS;
 
 
 
1078	return c;
1079}
1080
1081static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1082{
1083	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1084	if (!c)
1085		return NULL;
1086	skb_queue_head_init(&c->mfc_un.unres.unresolved);
1087	c->mfc_un.unres.expires = jiffies + 10 * HZ;
1088	return c;
1089}
1090
1091/*
1092 *	A cache entry has gone into a resolved state from queued
1093 */
1094
1095static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1096				struct mfc6_cache *uc, struct mfc6_cache *c)
1097{
1098	struct sk_buff *skb;
1099
1100	/*
1101	 *	Play the pending entries through our router
1102	 */
1103
1104	while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1105		if (ipv6_hdr(skb)->version == 0) {
1106			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 
1107
1108			if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
 
1109				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1110			} else {
1111				nlh->nlmsg_type = NLMSG_ERROR;
1112				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1113				skb_trim(skb, nlh->nlmsg_len);
1114				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1115			}
1116			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1117		} else
1118			ip6_mr_forward(net, mrt, skb, c);
 
 
 
1119	}
1120}
1121
1122/*
1123 *	Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1124 *	expects the following bizarre scheme.
1125 *
1126 *	Called under mrt_lock.
1127 */
1128
1129static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1130			      mifi_t mifi, int assert)
1131{
 
1132	struct sk_buff *skb;
1133	struct mrt6msg *msg;
1134	int ret;
1135
1136#ifdef CONFIG_IPV6_PIMSM_V2
1137	if (assert == MRT6MSG_WHOLEPKT)
1138		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1139						+sizeof(*msg));
1140	else
1141#endif
1142		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1143
1144	if (!skb)
1145		return -ENOBUFS;
1146
1147	/* I suppose that internal messages
1148	 * do not require checksums */
1149
1150	skb->ip_summed = CHECKSUM_UNNECESSARY;
1151
1152#ifdef CONFIG_IPV6_PIMSM_V2
1153	if (assert == MRT6MSG_WHOLEPKT) {
1154		/* Ugly, but we have no choice with this interface.
1155		   Duplicate old header, fix length etc.
1156		   And all this only to mangle msg->im6_msgtype and
1157		   to set msg->im6_mbz to "mbz" :-)
1158		 */
1159		skb_push(skb, -skb_network_offset(pkt));
1160
1161		skb_push(skb, sizeof(*msg));
1162		skb_reset_transport_header(skb);
1163		msg = (struct mrt6msg *)skb_transport_header(skb);
1164		msg->im6_mbz = 0;
1165		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1166		msg->im6_mif = mrt->mroute_reg_vif_num;
 
 
 
1167		msg->im6_pad = 0;
1168		msg->im6_src = ipv6_hdr(pkt)->saddr;
1169		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1170
1171		skb->ip_summed = CHECKSUM_UNNECESSARY;
1172	} else
1173#endif
1174	{
1175	/*
1176	 *	Copy the IP header
1177	 */
1178
1179	skb_put(skb, sizeof(struct ipv6hdr));
1180	skb_reset_network_header(skb);
1181	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1182
1183	/*
1184	 *	Add our header
1185	 */
1186	skb_put(skb, sizeof(*msg));
1187	skb_reset_transport_header(skb);
1188	msg = (struct mrt6msg *)skb_transport_header(skb);
1189
1190	msg->im6_mbz = 0;
1191	msg->im6_msgtype = assert;
1192	msg->im6_mif = mifi;
1193	msg->im6_pad = 0;
1194	msg->im6_src = ipv6_hdr(pkt)->saddr;
1195	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1196
1197	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1198	skb->ip_summed = CHECKSUM_UNNECESSARY;
1199	}
1200
1201	if (!mrt->mroute6_sk) {
 
1202		kfree_skb(skb);
1203		return -EINVAL;
1204	}
1205
1206	/*
1207	 *	Deliver to user space multicast routing algorithms
1208	 */
1209	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
 
1210	if (ret < 0) {
1211		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1212		kfree_skb(skb);
1213	}
1214
1215	return ret;
1216}
1217
1218/*
1219 *	Queue a packet for resolution. It gets locked cache entry!
1220 */
1221
1222static int
1223ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1224{
 
1225	bool found = false;
1226	int err;
1227	struct mfc6_cache *c;
1228
1229	spin_lock_bh(&mfc_unres_lock);
1230	list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1231		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1232		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1233			found = true;
1234			break;
1235		}
1236	}
1237
1238	if (!found) {
1239		/*
1240		 *	Create a new entry if allowable
1241		 */
1242
1243		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1244		    (c = ip6mr_cache_alloc_unres()) == NULL) {
1245			spin_unlock_bh(&mfc_unres_lock);
1246
1247			kfree_skb(skb);
1248			return -ENOBUFS;
1249		}
1250
1251		/*
1252		 *	Fill in the new cache entry
1253		 */
1254		c->mf6c_parent = -1;
1255		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1256		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1257
1258		/*
1259		 *	Reflect first query at pim6sd
1260		 */
1261		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1262		if (err < 0) {
1263			/* If the report failed throw the cache entry
1264			   out - Brad Parker
1265			 */
1266			spin_unlock_bh(&mfc_unres_lock);
1267
1268			ip6mr_cache_free(c);
1269			kfree_skb(skb);
1270			return err;
1271		}
1272
1273		atomic_inc(&mrt->cache_resolve_queue_len);
1274		list_add(&c->list, &mrt->mfc6_unres_queue);
1275		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1276
1277		ipmr_do_expire_process(mrt);
1278	}
1279
1280	/*
1281	 *	See if we can append the packet
1282	 */
1283	if (c->mfc_un.unres.unresolved.qlen > 3) {
1284		kfree_skb(skb);
1285		err = -ENOBUFS;
1286	} else {
1287		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
 
 
 
 
1288		err = 0;
1289	}
1290
1291	spin_unlock_bh(&mfc_unres_lock);
1292	return err;
1293}
1294
1295/*
1296 *	MFC6 cache manipulation by user space
1297 */
1298
1299static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1300			    int parent)
1301{
1302	int line;
1303	struct mfc6_cache *c, *next;
1304
1305	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1306
1307	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1308		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1309		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1310				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1311		    (parent == -1 || parent == c->mf6c_parent)) {
1312			write_lock_bh(&mrt_lock);
1313			list_del(&c->list);
1314			write_unlock_bh(&mrt_lock);
 
1315
1316			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1317			ip6mr_cache_free(c);
1318			return 0;
1319		}
1320	}
1321	return -ENOENT;
1322}
1323
1324static int ip6mr_device_event(struct notifier_block *this,
1325			      unsigned long event, void *ptr)
1326{
1327	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1328	struct net *net = dev_net(dev);
1329	struct mr6_table *mrt;
1330	struct mif_device *v;
1331	int ct;
1332	LIST_HEAD(list);
1333
1334	if (event != NETDEV_UNREGISTER)
1335		return NOTIFY_DONE;
1336
1337	ip6mr_for_each_table(mrt, net) {
1338		v = &mrt->vif6_table[0];
1339		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1340			if (v->dev == dev)
1341				mif6_delete(mrt, ct, &list);
1342		}
1343	}
1344	unregister_netdevice_many(&list);
1345
1346	return NOTIFY_DONE;
1347}
1348
 
 
 
 
 
 
 
 
 
 
 
 
1349static struct notifier_block ip6_mr_notifier = {
1350	.notifier_call = ip6mr_device_event
1351};
1352
1353/*
1354 *	Setup for IP multicast routing
1355 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1356
 
 
 
 
 
 
 
1357static int __net_init ip6mr_net_init(struct net *net)
1358{
1359	int err;
1360
 
 
 
 
1361	err = ip6mr_rules_init(net);
1362	if (err < 0)
1363		goto fail;
1364
1365#ifdef CONFIG_PROC_FS
1366	err = -ENOMEM;
1367	if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
 
1368		goto proc_vif_fail;
1369	if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
 
1370		goto proc_cache_fail;
1371#endif
1372
1373	return 0;
1374
1375#ifdef CONFIG_PROC_FS
1376proc_cache_fail:
1377	remove_proc_entry("ip6_mr_vif", net->proc_net);
1378proc_vif_fail:
 
1379	ip6mr_rules_exit(net);
 
1380#endif
1381fail:
 
1382	return err;
1383}
1384
1385static void __net_exit ip6mr_net_exit(struct net *net)
1386{
1387#ifdef CONFIG_PROC_FS
1388	remove_proc_entry("ip6_mr_cache", net->proc_net);
1389	remove_proc_entry("ip6_mr_vif", net->proc_net);
1390#endif
1391	ip6mr_rules_exit(net);
 
 
 
 
 
 
 
 
 
 
1392}
1393
1394static struct pernet_operations ip6mr_net_ops = {
1395	.init = ip6mr_net_init,
1396	.exit = ip6mr_net_exit,
 
 
 
 
 
 
 
1397};
1398
1399int __init ip6_mr_init(void)
1400{
1401	int err;
1402
1403	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1404				       sizeof(struct mfc6_cache),
1405				       0, SLAB_HWCACHE_ALIGN,
1406				       NULL);
1407	if (!mrt_cachep)
1408		return -ENOMEM;
1409
1410	err = register_pernet_subsys(&ip6mr_net_ops);
1411	if (err)
1412		goto reg_pernet_fail;
1413
1414	err = register_netdevice_notifier(&ip6_mr_notifier);
1415	if (err)
1416		goto reg_notif_fail;
1417#ifdef CONFIG_IPV6_PIMSM_V2
1418	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1419		pr_err("%s: can't add PIM protocol\n", __func__);
1420		err = -EAGAIN;
1421		goto add_proto_fail;
1422	}
1423#endif
1424	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1425		      ip6mr_rtm_dumproute, NULL);
1426	return 0;
 
1427#ifdef CONFIG_IPV6_PIMSM_V2
 
1428add_proto_fail:
1429	unregister_netdevice_notifier(&ip6_mr_notifier);
1430#endif
1431reg_notif_fail:
1432	unregister_pernet_subsys(&ip6mr_net_ops);
1433reg_pernet_fail:
1434	kmem_cache_destroy(mrt_cachep);
1435	return err;
1436}
1437
1438void ip6_mr_cleanup(void)
1439{
1440	rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1441#ifdef CONFIG_IPV6_PIMSM_V2
1442	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1443#endif
1444	unregister_netdevice_notifier(&ip6_mr_notifier);
1445	unregister_pernet_subsys(&ip6mr_net_ops);
1446	kmem_cache_destroy(mrt_cachep);
1447}
1448
1449static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1450			 struct mf6cctl *mfc, int mrtsock, int parent)
1451{
1452	bool found = false;
1453	int line;
1454	struct mfc6_cache *uc, *c;
1455	unsigned char ttls[MAXMIFS];
1456	int i;
 
 
 
1457
1458	if (mfc->mf6cc_parent >= MAXMIFS)
1459		return -ENFILE;
1460
1461	memset(ttls, 255, MAXMIFS);
1462	for (i = 0; i < MAXMIFS; i++) {
1463		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1464			ttls[i] = 1;
1465
1466	}
1467
1468	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1469
1470	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1471		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1472		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1473				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1474		    (parent == -1 || parent == mfc->mf6cc_parent)) {
1475			found = true;
1476			break;
1477		}
1478	}
1479
1480	if (found) {
1481		write_lock_bh(&mrt_lock);
1482		c->mf6c_parent = mfc->mf6cc_parent;
1483		ip6mr_update_thresholds(mrt, c, ttls);
 
 
 
 
 
1484		if (!mrtsock)
1485			c->mfc_flags |= MFC_STATIC;
1486		write_unlock_bh(&mrt_lock);
 
 
1487		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1488		return 0;
1489	}
1490
1491	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1492	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1493		return -EINVAL;
1494
1495	c = ip6mr_cache_alloc();
1496	if (!c)
1497		return -ENOMEM;
1498
1499	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1500	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1501	c->mf6c_parent = mfc->mf6cc_parent;
1502	ip6mr_update_thresholds(mrt, c, ttls);
1503	if (!mrtsock)
1504		c->mfc_flags |= MFC_STATIC;
1505
1506	write_lock_bh(&mrt_lock);
1507	list_add(&c->list, &mrt->mfc6_cache_array[line]);
1508	write_unlock_bh(&mrt_lock);
 
 
 
 
 
1509
1510	/*
1511	 *	Check to see if we resolved a queued list. If so we
1512	 *	need to send on the frames and tidy up.
1513	 */
1514	found = false;
1515	spin_lock_bh(&mfc_unres_lock);
1516	list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
 
1517		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1518		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1519			list_del(&uc->list);
1520			atomic_dec(&mrt->cache_resolve_queue_len);
1521			found = true;
1522			break;
1523		}
1524	}
1525	if (list_empty(&mrt->mfc6_unres_queue))
1526		del_timer(&mrt->ipmr_expire_timer);
1527	spin_unlock_bh(&mfc_unres_lock);
1528
1529	if (found) {
1530		ip6mr_cache_resolve(net, mrt, uc, c);
1531		ip6mr_cache_free(uc);
1532	}
 
 
1533	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1534	return 0;
1535}
1536
1537/*
1538 *	Close the multicast socket, and clear the vif tables etc
1539 */
1540
1541static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1542{
1543	int i;
1544	LIST_HEAD(list);
1545	struct mfc6_cache *c, *next;
1546
1547	/*
1548	 *	Shut down all active vif entries
1549	 */
1550	for (i = 0; i < mrt->maxvif; i++) {
1551		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1552			continue;
1553		mif6_delete(mrt, i, &list);
 
 
 
1554	}
1555	unregister_netdevice_many(&list);
1556
1557	/*
1558	 *	Wipe the cache
1559	 */
1560	for (i = 0; i < MFC6_LINES; i++) {
1561		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1562			if (!all && (c->mfc_flags & MFC_STATIC))
1563				continue;
1564			write_lock_bh(&mrt_lock);
1565			list_del(&c->list);
1566			write_unlock_bh(&mrt_lock);
1567
1568			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1569			ip6mr_cache_free(c);
 
1570		}
1571	}
1572
1573	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1574		spin_lock_bh(&mfc_unres_lock);
1575		list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1576			list_del(&c->list);
1577			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1578			ip6mr_destroy_unres(mrt, c);
 
 
 
 
1579		}
1580		spin_unlock_bh(&mfc_unres_lock);
1581	}
1582}
1583
1584static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1585{
1586	int err = 0;
1587	struct net *net = sock_net(sk);
1588
1589	rtnl_lock();
1590	write_lock_bh(&mrt_lock);
1591	if (likely(mrt->mroute6_sk == NULL)) {
1592		mrt->mroute6_sk = sk;
1593		net->ipv6.devconf_all->mc_forwarding++;
1594		inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1595					     NETCONFA_IFINDEX_ALL,
1596					     net->ipv6.devconf_all);
1597	}
1598	else
1599		err = -EADDRINUSE;
1600	write_unlock_bh(&mrt_lock);
 
 
 
 
 
1601
 
 
 
 
 
1602	rtnl_unlock();
1603
1604	return err;
1605}
1606
1607int ip6mr_sk_done(struct sock *sk)
1608{
1609	int err = -EACCES;
1610	struct net *net = sock_net(sk);
1611	struct mr6_table *mrt;
 
 
 
 
 
 
 
 
 
 
1612
1613	rtnl_lock();
1614	ip6mr_for_each_table(mrt, net) {
1615		if (sk == mrt->mroute6_sk) {
1616			write_lock_bh(&mrt_lock);
1617			mrt->mroute6_sk = NULL;
1618			net->ipv6.devconf_all->mc_forwarding--;
1619			inet6_netconf_notify_devconf(net,
 
 
 
 
 
1620						     NETCONFA_MC_FORWARDING,
1621						     NETCONFA_IFINDEX_ALL,
1622						     net->ipv6.devconf_all);
1623			write_unlock_bh(&mrt_lock);
1624
1625			mroute_clean_tables(mrt, false);
1626			err = 0;
1627			break;
1628		}
1629	}
1630	rtnl_unlock();
1631
1632	return err;
1633}
1634
1635struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1636{
1637	struct mr6_table *mrt;
1638	struct flowi6 fl6 = {
1639		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1640		.flowi6_oif	= skb->dev->ifindex,
1641		.flowi6_mark	= skb->mark,
1642	};
1643
1644	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1645		return NULL;
1646
1647	return mrt->mroute6_sk;
1648}
 
1649
1650/*
1651 *	Socket options and virtual interface manipulation. The whole
1652 *	virtual interface system is a complete heap, but unfortunately
1653 *	that's how BSD mrouted happens to think. Maybe one day with a proper
1654 *	MOSPF/PIM router set up we can clean this up.
1655 */
1656
1657int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
 
1658{
1659	int ret, parent = 0;
1660	struct mif6ctl vif;
1661	struct mf6cctl mfc;
1662	mifi_t mifi;
1663	struct net *net = sock_net(sk);
1664	struct mr6_table *mrt;
 
 
 
 
1665
1666	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1667	if (!mrt)
1668		return -ENOENT;
1669
1670	if (optname != MRT6_INIT) {
1671		if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
 
1672			return -EACCES;
1673	}
1674
1675	switch (optname) {
1676	case MRT6_INIT:
1677		if (sk->sk_type != SOCK_RAW ||
1678		    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1679			return -EOPNOTSUPP;
1680		if (optlen < sizeof(int))
1681			return -EINVAL;
1682
1683		return ip6mr_sk_init(mrt, sk);
1684
1685	case MRT6_DONE:
1686		return ip6mr_sk_done(sk);
1687
1688	case MRT6_ADD_MIF:
1689		if (optlen < sizeof(vif))
1690			return -EINVAL;
1691		if (copy_from_user(&vif, optval, sizeof(vif)))
1692			return -EFAULT;
1693		if (vif.mif6c_mifi >= MAXMIFS)
1694			return -ENFILE;
1695		rtnl_lock();
1696		ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
 
1697		rtnl_unlock();
1698		return ret;
1699
1700	case MRT6_DEL_MIF:
1701		if (optlen < sizeof(mifi_t))
1702			return -EINVAL;
1703		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1704			return -EFAULT;
1705		rtnl_lock();
1706		ret = mif6_delete(mrt, mifi, NULL);
1707		rtnl_unlock();
1708		return ret;
1709
1710	/*
1711	 *	Manipulate the forwarding caches. These live
1712	 *	in a sort of kernel/user symbiosis.
1713	 */
1714	case MRT6_ADD_MFC:
1715	case MRT6_DEL_MFC:
1716		parent = -1;
 
1717	case MRT6_ADD_MFC_PROXY:
1718	case MRT6_DEL_MFC_PROXY:
1719		if (optlen < sizeof(mfc))
1720			return -EINVAL;
1721		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1722			return -EFAULT;
1723		if (parent == 0)
1724			parent = mfc.mf6cc_parent;
1725		rtnl_lock();
1726		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1727			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1728		else
1729			ret = ip6mr_mfc_add(net, mrt, &mfc,
1730					    sk == mrt->mroute6_sk, parent);
 
 
1731		rtnl_unlock();
1732		return ret;
1733
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1734	/*
1735	 *	Control PIM assert (to activate pim will activate assert)
1736	 */
1737	case MRT6_ASSERT:
1738	{
1739		int v;
1740
1741		if (optlen != sizeof(v))
1742			return -EINVAL;
1743		if (get_user(v, (int __user *)optval))
1744			return -EFAULT;
1745		mrt->mroute_do_assert = v;
1746		return 0;
1747	}
1748
1749#ifdef CONFIG_IPV6_PIMSM_V2
1750	case MRT6_PIM:
1751	{
 
1752		int v;
1753
1754		if (optlen != sizeof(v))
1755			return -EINVAL;
1756		if (get_user(v, (int __user *)optval))
1757			return -EFAULT;
 
 
1758		v = !!v;
1759		rtnl_lock();
1760		ret = 0;
1761		if (v != mrt->mroute_do_pim) {
1762			mrt->mroute_do_pim = v;
1763			mrt->mroute_do_assert = v;
 
1764		}
1765		rtnl_unlock();
1766		return ret;
1767	}
1768
1769#endif
1770#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1771	case MRT6_TABLE:
1772	{
1773		u32 v;
1774
1775		if (optlen != sizeof(u32))
1776			return -EINVAL;
1777		if (get_user(v, (u32 __user *)optval))
1778			return -EFAULT;
1779		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1780		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1781			return -EINVAL;
1782		if (sk == mrt->mroute6_sk)
1783			return -EBUSY;
1784
1785		rtnl_lock();
1786		ret = 0;
1787		if (!ip6mr_new_table(net, v))
1788			ret = -ENOMEM;
1789		raw6_sk(sk)->ip6mr_table = v;
 
 
1790		rtnl_unlock();
1791		return ret;
1792	}
1793#endif
1794	/*
1795	 *	Spurious command, or MRT6_VERSION which you cannot
1796	 *	set.
1797	 */
1798	default:
1799		return -ENOPROTOOPT;
1800	}
1801}
1802
1803/*
1804 *	Getsock opt support for the multicast routing system.
1805 */
1806
1807int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1808			  int __user *optlen)
1809{
1810	int olr;
1811	int val;
1812	struct net *net = sock_net(sk);
1813	struct mr6_table *mrt;
 
 
 
 
1814
1815	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1816	if (!mrt)
1817		return -ENOENT;
1818
1819	switch (optname) {
1820	case MRT6_VERSION:
1821		val = 0x0305;
1822		break;
1823#ifdef CONFIG_IPV6_PIMSM_V2
1824	case MRT6_PIM:
1825		val = mrt->mroute_do_pim;
1826		break;
1827#endif
1828	case MRT6_ASSERT:
1829		val = mrt->mroute_do_assert;
1830		break;
1831	default:
1832		return -ENOPROTOOPT;
1833	}
1834
1835	if (get_user(olr, optlen))
1836		return -EFAULT;
1837
1838	olr = min_t(int, olr, sizeof(int));
1839	if (olr < 0)
1840		return -EINVAL;
1841
1842	if (put_user(olr, optlen))
1843		return -EFAULT;
1844	if (copy_to_user(optval, &val, olr))
1845		return -EFAULT;
1846	return 0;
1847}
1848
1849/*
1850 *	The IP multicast ioctl support routines.
1851 */
1852
1853int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1854{
1855	struct sioc_sg_req6 sr;
1856	struct sioc_mif_req6 vr;
1857	struct mif_device *vif;
1858	struct mfc6_cache *c;
1859	struct net *net = sock_net(sk);
1860	struct mr6_table *mrt;
1861
1862	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1863	if (!mrt)
1864		return -ENOENT;
1865
1866	switch (cmd) {
1867	case SIOCGETMIFCNT_IN6:
1868		if (copy_from_user(&vr, arg, sizeof(vr)))
1869			return -EFAULT;
1870		if (vr.mifi >= mrt->maxvif)
1871			return -EINVAL;
1872		read_lock(&mrt_lock);
1873		vif = &mrt->vif6_table[vr.mifi];
1874		if (MIF_EXISTS(mrt, vr.mifi)) {
1875			vr.icount = vif->pkt_in;
1876			vr.ocount = vif->pkt_out;
1877			vr.ibytes = vif->bytes_in;
1878			vr.obytes = vif->bytes_out;
1879			read_unlock(&mrt_lock);
1880
1881			if (copy_to_user(arg, &vr, sizeof(vr)))
1882				return -EFAULT;
1883			return 0;
1884		}
1885		read_unlock(&mrt_lock);
1886		return -EADDRNOTAVAIL;
1887	case SIOCGETSGCNT_IN6:
1888		if (copy_from_user(&sr, arg, sizeof(sr)))
1889			return -EFAULT;
1890
1891		read_lock(&mrt_lock);
1892		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
 
1893		if (c) {
1894			sr.pktcnt = c->mfc_un.res.pkt;
1895			sr.bytecnt = c->mfc_un.res.bytes;
1896			sr.wrong_if = c->mfc_un.res.wrong_if;
1897			read_unlock(&mrt_lock);
1898
1899			if (copy_to_user(arg, &sr, sizeof(sr)))
1900				return -EFAULT;
1901			return 0;
1902		}
1903		read_unlock(&mrt_lock);
1904		return -EADDRNOTAVAIL;
1905	default:
1906		return -ENOIOCTLCMD;
1907	}
1908}
1909
1910#ifdef CONFIG_COMPAT
1911struct compat_sioc_sg_req6 {
1912	struct sockaddr_in6 src;
1913	struct sockaddr_in6 grp;
1914	compat_ulong_t pktcnt;
1915	compat_ulong_t bytecnt;
1916	compat_ulong_t wrong_if;
1917};
1918
1919struct compat_sioc_mif_req6 {
1920	mifi_t	mifi;
1921	compat_ulong_t icount;
1922	compat_ulong_t ocount;
1923	compat_ulong_t ibytes;
1924	compat_ulong_t obytes;
1925};
1926
1927int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1928{
1929	struct compat_sioc_sg_req6 sr;
1930	struct compat_sioc_mif_req6 vr;
1931	struct mif_device *vif;
1932	struct mfc6_cache *c;
1933	struct net *net = sock_net(sk);
1934	struct mr6_table *mrt;
1935
1936	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1937	if (!mrt)
1938		return -ENOENT;
1939
1940	switch (cmd) {
1941	case SIOCGETMIFCNT_IN6:
1942		if (copy_from_user(&vr, arg, sizeof(vr)))
1943			return -EFAULT;
1944		if (vr.mifi >= mrt->maxvif)
1945			return -EINVAL;
1946		read_lock(&mrt_lock);
1947		vif = &mrt->vif6_table[vr.mifi];
1948		if (MIF_EXISTS(mrt, vr.mifi)) {
1949			vr.icount = vif->pkt_in;
1950			vr.ocount = vif->pkt_out;
1951			vr.ibytes = vif->bytes_in;
1952			vr.obytes = vif->bytes_out;
1953			read_unlock(&mrt_lock);
 
1954
1955			if (copy_to_user(arg, &vr, sizeof(vr)))
1956				return -EFAULT;
1957			return 0;
1958		}
1959		read_unlock(&mrt_lock);
1960		return -EADDRNOTAVAIL;
1961	case SIOCGETSGCNT_IN6:
1962		if (copy_from_user(&sr, arg, sizeof(sr)))
1963			return -EFAULT;
1964
1965		read_lock(&mrt_lock);
1966		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1967		if (c) {
1968			sr.pktcnt = c->mfc_un.res.pkt;
1969			sr.bytecnt = c->mfc_un.res.bytes;
1970			sr.wrong_if = c->mfc_un.res.wrong_if;
1971			read_unlock(&mrt_lock);
1972
1973			if (copy_to_user(arg, &sr, sizeof(sr)))
1974				return -EFAULT;
1975			return 0;
1976		}
1977		read_unlock(&mrt_lock);
1978		return -EADDRNOTAVAIL;
1979	default:
1980		return -ENOIOCTLCMD;
1981	}
1982}
1983#endif
1984
1985static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1986{
1987	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
1988			 IPSTATS_MIB_OUTFORWDATAGRAMS);
1989	IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
1990			 IPSTATS_MIB_OUTOCTETS, skb->len);
1991	return dst_output(net, sk, skb);
1992}
1993
1994/*
1995 *	Processing handlers for ip6mr_forward
1996 */
1997
1998static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1999			  struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2000{
 
 
2001	struct ipv6hdr *ipv6h;
2002	struct mif_device *vif = &mrt->vif6_table[vifi];
2003	struct net_device *dev;
2004	struct dst_entry *dst;
2005	struct flowi6 fl6;
2006
2007	if (!vif->dev)
 
2008		goto out_free;
2009
2010#ifdef CONFIG_IPV6_PIMSM_V2
2011	if (vif->flags & MIFF_REGISTER) {
2012		vif->pkt_out++;
2013		vif->bytes_out += skb->len;
2014		vif->dev->stats.tx_bytes += skb->len;
2015		vif->dev->stats.tx_packets++;
2016		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2017		goto out_free;
2018	}
2019#endif
2020
2021	ipv6h = ipv6_hdr(skb);
2022
2023	fl6 = (struct flowi6) {
2024		.flowi6_oif = vif->link,
2025		.daddr = ipv6h->daddr,
2026	};
2027
2028	dst = ip6_route_output(net, NULL, &fl6);
2029	if (dst->error) {
2030		dst_release(dst);
2031		goto out_free;
2032	}
2033
2034	skb_dst_drop(skb);
2035	skb_dst_set(skb, dst);
2036
2037	/*
2038	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2039	 * not only before forwarding, but after forwarding on all output
2040	 * interfaces. It is clear, if mrouter runs a multicasting
2041	 * program, it should receive packets not depending to what interface
2042	 * program is joined.
2043	 * If we will not make it, the program will have to join on all
2044	 * interfaces. On the other hand, multihoming host (or router, but
2045	 * not mrouter) cannot join to more than one interface - it will
2046	 * result in receiving multiple packets.
2047	 */
2048	dev = vif->dev;
2049	skb->dev = dev;
2050	vif->pkt_out++;
2051	vif->bytes_out += skb->len;
2052
2053	/* We are about to write */
2054	/* XXX: extension headers? */
2055	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2056		goto out_free;
2057
2058	ipv6h = ipv6_hdr(skb);
2059	ipv6h->hop_limit--;
2060
2061	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2062
2063	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2064		       net, NULL, skb, skb->dev, dev,
2065		       ip6mr_forward2_finish);
2066
2067out_free:
2068	kfree_skb(skb);
2069	return 0;
2070}
2071
2072static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
 
2073{
2074	int ct;
2075
2076	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2077		if (mrt->vif6_table[ct].dev == dev)
 
2078			break;
2079	}
2080	return ct;
2081}
2082
2083static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2084			   struct sk_buff *skb, struct mfc6_cache *cache)
 
 
2085{
2086	int psend = -1;
2087	int vif, ct;
2088	int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2089
2090	vif = cache->mf6c_parent;
2091	cache->mfc_un.res.pkt++;
2092	cache->mfc_un.res.bytes += skb->len;
 
2093
2094	if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2095		struct mfc6_cache *cache_proxy;
2096
2097		/* For an (*,G) entry, we only check that the incoming
2098		 * interface is part of the static tree.
2099		 */
2100		cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2101		if (cache_proxy &&
2102		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2103			goto forward;
2104	}
2105
2106	/*
2107	 * Wrong interface: drop packet and (maybe) send PIM assert.
2108	 */
2109	if (mrt->vif6_table[vif].dev != skb->dev) {
2110		cache->mfc_un.res.wrong_if++;
2111
2112		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2113		    /* pimsm uses asserts, when switching from RPT to SPT,
2114		       so that we cannot check that packet arrived on an oif.
2115		       It is bad, but otherwise we would need to move pretty
2116		       large chunk of pimd to kernel. Ough... --ANK
2117		     */
2118		    (mrt->mroute_do_pim ||
2119		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2120		    time_after(jiffies,
2121			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2122			cache->mfc_un.res.last_assert = jiffies;
 
2123			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
 
 
 
2124		}
2125		goto dont_forward;
2126	}
2127
2128forward:
2129	mrt->vif6_table[vif].pkt_in++;
2130	mrt->vif6_table[vif].bytes_in += skb->len;
 
 
2131
2132	/*
2133	 *	Forward the frame
2134	 */
2135	if (ipv6_addr_any(&cache->mf6c_origin) &&
2136	    ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2137		if (true_vifi >= 0 &&
2138		    true_vifi != cache->mf6c_parent &&
2139		    ipv6_hdr(skb)->hop_limit >
2140				cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2141			/* It's an (*,*) entry and the packet is not coming from
2142			 * the upstream: forward the packet to the upstream
2143			 * only.
2144			 */
2145			psend = cache->mf6c_parent;
2146			goto last_forward;
2147		}
2148		goto dont_forward;
2149	}
2150	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
 
2151		/* For (*,G) entry, don't forward to the incoming interface */
2152		if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2153		    ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2154			if (psend != -1) {
2155				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2156				if (skb2)
2157					ip6mr_forward2(net, mrt, skb2, cache, psend);
2158			}
2159			psend = ct;
2160		}
2161	}
2162last_forward:
2163	if (psend != -1) {
2164		ip6mr_forward2(net, mrt, skb, cache, psend);
2165		return;
2166	}
2167
2168dont_forward:
2169	kfree_skb(skb);
2170}
2171
2172
2173/*
2174 *	Multicast packets for forwarding arrive here
2175 */
2176
2177int ip6_mr_input(struct sk_buff *skb)
2178{
2179	struct mfc6_cache *cache;
2180	struct net *net = dev_net(skb->dev);
2181	struct mr6_table *mrt;
2182	struct flowi6 fl6 = {
2183		.flowi6_iif	= skb->dev->ifindex,
2184		.flowi6_mark	= skb->mark,
2185	};
2186	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
2187
2188	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2189	if (err < 0) {
2190		kfree_skb(skb);
2191		return err;
2192	}
2193
2194	read_lock(&mrt_lock);
2195	cache = ip6mr_cache_find(mrt,
2196				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2197	if (!cache) {
2198		int vif = ip6mr_find_vif(mrt, skb->dev);
2199
2200		if (vif >= 0)
2201			cache = ip6mr_cache_find_any(mrt,
2202						     &ipv6_hdr(skb)->daddr,
2203						     vif);
2204	}
2205
2206	/*
2207	 *	No usable cache entry
2208	 */
2209	if (!cache) {
2210		int vif;
2211
2212		vif = ip6mr_find_vif(mrt, skb->dev);
2213		if (vif >= 0) {
2214			int err = ip6mr_cache_unresolved(mrt, vif, skb);
2215			read_unlock(&mrt_lock);
2216
2217			return err;
2218		}
2219		read_unlock(&mrt_lock);
2220		kfree_skb(skb);
2221		return -ENODEV;
2222	}
2223
2224	ip6_mr_forward(net, mrt, skb, cache);
2225
2226	read_unlock(&mrt_lock);
2227
2228	return 0;
2229}
2230
2231
2232static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2233			       struct mfc6_cache *c, struct rtmsg *rtm)
2234{
2235	int ct;
2236	struct rtnexthop *nhp;
2237	struct nlattr *mp_attr;
2238	struct rta_mfc_stats mfcs;
2239
2240	/* If cache is unresolved, don't try to parse IIF and OIF */
2241	if (c->mf6c_parent >= MAXMIFS)
2242		return -ENOENT;
2243
2244	if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2245	    nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2246		return -EMSGSIZE;
2247	mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2248	if (!mp_attr)
2249		return -EMSGSIZE;
2250
2251	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2252		if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2253			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2254			if (!nhp) {
2255				nla_nest_cancel(skb, mp_attr);
2256				return -EMSGSIZE;
2257			}
2258
2259			nhp->rtnh_flags = 0;
2260			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2261			nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2262			nhp->rtnh_len = sizeof(*nhp);
2263		}
2264	}
2265
2266	nla_nest_end(skb, mp_attr);
2267
2268	mfcs.mfcs_packets = c->mfc_un.res.pkt;
2269	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2270	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2271	if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2272		return -EMSGSIZE;
2273
2274	rtm->rtm_type = RTN_MULTICAST;
2275	return 1;
2276}
2277
2278int ip6mr_get_route(struct net *net,
2279		    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2280{
2281	int err;
2282	struct mr6_table *mrt;
2283	struct mfc6_cache *cache;
2284	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2285
2286	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2287	if (!mrt)
 
 
2288		return -ENOENT;
 
2289
2290	read_lock(&mrt_lock);
2291	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2292	if (!cache && skb->dev) {
2293		int vif = ip6mr_find_vif(mrt, skb->dev);
2294
2295		if (vif >= 0)
2296			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2297						     vif);
2298	}
2299
2300	if (!cache) {
2301		struct sk_buff *skb2;
2302		struct ipv6hdr *iph;
2303		struct net_device *dev;
2304		int vif;
2305
2306		if (nowait) {
2307			read_unlock(&mrt_lock);
2308			return -EAGAIN;
2309		}
2310
2311		dev = skb->dev;
2312		if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2313			read_unlock(&mrt_lock);
2314			return -ENODEV;
2315		}
2316
2317		/* really correct? */
2318		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2319		if (!skb2) {
2320			read_unlock(&mrt_lock);
2321			return -ENOMEM;
2322		}
2323
 
2324		skb_reset_transport_header(skb2);
2325
2326		skb_put(skb2, sizeof(struct ipv6hdr));
2327		skb_reset_network_header(skb2);
2328
2329		iph = ipv6_hdr(skb2);
2330		iph->version = 0;
2331		iph->priority = 0;
2332		iph->flow_lbl[0] = 0;
2333		iph->flow_lbl[1] = 0;
2334		iph->flow_lbl[2] = 0;
2335		iph->payload_len = 0;
2336		iph->nexthdr = IPPROTO_NONE;
2337		iph->hop_limit = 0;
2338		iph->saddr = rt->rt6i_src.addr;
2339		iph->daddr = rt->rt6i_dst.addr;
2340
2341		err = ip6mr_cache_unresolved(mrt, vif, skb2);
2342		read_unlock(&mrt_lock);
2343
2344		return err;
2345	}
2346
2347	if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2348		cache->mfc_flags |= MFC_NOTIFY;
2349
2350	err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2351	read_unlock(&mrt_lock);
2352	return err;
2353}
2354
2355static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2356			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2357			     int flags)
2358{
2359	struct nlmsghdr *nlh;
2360	struct rtmsg *rtm;
2361	int err;
2362
2363	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2364	if (!nlh)
2365		return -EMSGSIZE;
2366
2367	rtm = nlmsg_data(nlh);
2368	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2369	rtm->rtm_dst_len  = 128;
2370	rtm->rtm_src_len  = 128;
2371	rtm->rtm_tos      = 0;
2372	rtm->rtm_table    = mrt->id;
2373	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2374		goto nla_put_failure;
2375	rtm->rtm_type = RTN_MULTICAST;
2376	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2377	if (c->mfc_flags & MFC_STATIC)
2378		rtm->rtm_protocol = RTPROT_STATIC;
2379	else
2380		rtm->rtm_protocol = RTPROT_MROUTED;
2381	rtm->rtm_flags    = 0;
2382
2383	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2384	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2385		goto nla_put_failure;
2386	err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2387	/* do not break the dump if cache is unresolved */
2388	if (err < 0 && err != -ENOENT)
2389		goto nla_put_failure;
2390
2391	nlmsg_end(skb, nlh);
2392	return 0;
2393
2394nla_put_failure:
2395	nlmsg_cancel(skb, nlh);
2396	return -EMSGSIZE;
2397}
2398
 
 
 
 
 
 
 
 
2399static int mr6_msgsize(bool unresolved, int maxvif)
2400{
2401	size_t len =
2402		NLMSG_ALIGN(sizeof(struct rtmsg))
2403		+ nla_total_size(4)	/* RTA_TABLE */
2404		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2405		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2406		;
2407
2408	if (!unresolved)
2409		len = len
2410		      + nla_total_size(4)	/* RTA_IIF */
2411		      + nla_total_size(0)	/* RTA_MULTIPATH */
2412		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2413						/* RTA_MFC_STATS */
2414		      + nla_total_size(sizeof(struct rta_mfc_stats))
2415		;
2416
2417	return len;
2418}
2419
2420static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2421			      int cmd)
2422{
2423	struct net *net = read_pnet(&mrt->net);
2424	struct sk_buff *skb;
2425	int err = -ENOBUFS;
2426
2427	skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2428			GFP_ATOMIC);
2429	if (!skb)
2430		goto errout;
2431
2432	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2433	if (err < 0)
2434		goto errout;
2435
2436	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2437	return;
2438
2439errout:
2440	kfree_skb(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2441	if (err < 0)
2442		rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2443}
2444
2445static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2446{
2447	struct net *net = sock_net(skb->sk);
2448	struct mr6_table *mrt;
2449	struct mfc6_cache *mfc;
2450	unsigned int t = 0, s_t;
2451	unsigned int h = 0, s_h;
2452	unsigned int e = 0, s_e;
2453
2454	s_t = cb->args[0];
2455	s_h = cb->args[1];
2456	s_e = cb->args[2];
2457
2458	read_lock(&mrt_lock);
2459	ip6mr_for_each_table(mrt, net) {
2460		if (t < s_t)
2461			goto next_table;
2462		if (t > s_t)
2463			s_h = 0;
2464		for (h = s_h; h < MFC6_LINES; h++) {
2465			list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2466				if (e < s_e)
2467					goto next_entry;
2468				if (ip6mr_fill_mroute(mrt, skb,
2469						      NETLINK_CB(cb->skb).portid,
2470						      cb->nlh->nlmsg_seq,
2471						      mfc, RTM_NEWROUTE,
2472						      NLM_F_MULTI) < 0)
2473					goto done;
2474next_entry:
2475				e++;
2476			}
2477			e = s_e = 0;
2478		}
2479		spin_lock_bh(&mfc_unres_lock);
2480		list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2481			if (e < s_e)
2482				goto next_entry2;
2483			if (ip6mr_fill_mroute(mrt, skb,
2484					      NETLINK_CB(cb->skb).portid,
2485					      cb->nlh->nlmsg_seq,
2486					      mfc, RTM_NEWROUTE,
2487					      NLM_F_MULTI) < 0) {
2488				spin_unlock_bh(&mfc_unres_lock);
2489				goto done;
2490			}
2491next_entry2:
2492			e++;
2493		}
2494		spin_unlock_bh(&mfc_unres_lock);
2495		e = s_e = 0;
2496		s_h = 0;
2497next_table:
2498		t++;
2499	}
2500done:
2501	read_unlock(&mrt_lock);
2502
2503	cb->args[2] = e;
2504	cb->args[1] = h;
2505	cb->args[0] = t;
2506
2507	return skb->len;
 
2508}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux IPv6 multicast routing support for BSD pim6sd
   4 *	Based on net/ipv4/ipmr.c.
   5 *
   6 *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
   7 *		LSIIT Laboratory, Strasbourg, France
   8 *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
   9 *		6WIND, Paris, France
  10 *	Copyright (C)2007,2008 USAGI/WIDE Project
  11 *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
 
 
 
 
 
 
  12 */
  13
  14#include <linux/uaccess.h>
  15#include <linux/types.h>
  16#include <linux/sched.h>
  17#include <linux/errno.h>
 
  18#include <linux/mm.h>
  19#include <linux/kernel.h>
  20#include <linux/fcntl.h>
  21#include <linux/stat.h>
  22#include <linux/socket.h>
  23#include <linux/inet.h>
  24#include <linux/netdevice.h>
  25#include <linux/inetdevice.h>
  26#include <linux/proc_fs.h>
  27#include <linux/seq_file.h>
  28#include <linux/init.h>
 
  29#include <linux/compat.h>
  30#include <linux/rhashtable.h>
  31#include <net/protocol.h>
  32#include <linux/skbuff.h>
 
  33#include <net/raw.h>
  34#include <linux/notifier.h>
  35#include <linux/if_arp.h>
  36#include <net/checksum.h>
  37#include <net/netlink.h>
  38#include <net/fib_rules.h>
  39
  40#include <net/ipv6.h>
  41#include <net/ip6_route.h>
  42#include <linux/mroute6.h>
  43#include <linux/pim.h>
  44#include <net/addrconf.h>
  45#include <linux/netfilter_ipv6.h>
  46#include <linux/export.h>
  47#include <net/ip6_checksum.h>
  48#include <linux/netconf.h>
  49#include <net/ip_tunnels.h>
  50
  51#include <linux/nospec.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52
  53struct ip6mr_rule {
  54	struct fib_rule		common;
  55};
  56
  57struct ip6mr_result {
  58	struct mr_table	*mrt;
  59};
  60
  61/* Big lock, protecting vif table, mrt cache and mroute socket state.
  62   Note that the changes are semaphored via rtnl_lock.
  63 */
  64
  65static DEFINE_SPINLOCK(mrt_lock);
  66
  67static struct net_device *vif_dev_read(const struct vif_device *vif)
  68{
  69	return rcu_dereference(vif->dev);
  70}
  71
  72/* Multicast router control variables */
  73
  74/* Special spinlock for queue of unresolved entries */
  75static DEFINE_SPINLOCK(mfc_unres_lock);
  76
  77/* We return to original Alan's scheme. Hash table of resolved
  78   entries is changed only in process context and protected
  79   with weak lock mrt_lock. Queue of unresolved entries is protected
  80   with strong spinlock mfc_unres_lock.
  81
  82   In this case data path is free of exclusive locks at all.
  83 */
  84
  85static struct kmem_cache *mrt_cachep __read_mostly;
  86
  87static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
  88static void ip6mr_free_table(struct mr_table *mrt);
  89
  90static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
  91			   struct net_device *dev, struct sk_buff *skb,
  92			   struct mfc6_cache *cache);
  93static int ip6mr_cache_report(const struct mr_table *mrt, struct sk_buff *pkt,
  94			      mifi_t mifi, int assert);
  95static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
 
 
  96			      int cmd);
  97static void mrt6msg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
  98static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
  99			      struct netlink_ext_ack *extack);
 100static int ip6mr_rtm_dumproute(struct sk_buff *skb,
 101			       struct netlink_callback *cb);
 102static void mroute_clean_tables(struct mr_table *mrt, int flags);
 103static void ipmr_expire_process(struct timer_list *t);
 104
 105#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 106#define ip6mr_for_each_table(mrt, net) \
 107	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \
 108				lockdep_rtnl_is_held() || \
 109				list_empty(&net->ipv6.mr6_tables))
 110
 111static bool ip6mr_can_free_table(struct net *net)
 112{
 113	return !check_net(net) || !net_initialized(net);
 114}
 115
 116static struct mr_table *ip6mr_mr_table_iter(struct net *net,
 117					    struct mr_table *mrt)
 118{
 119	struct mr_table *ret;
 120
 121	if (!mrt)
 122		ret = list_entry_rcu(net->ipv6.mr6_tables.next,
 123				     struct mr_table, list);
 124	else
 125		ret = list_entry_rcu(mrt->list.next,
 126				     struct mr_table, list);
 127
 128	if (&ret->list == &net->ipv6.mr6_tables)
 129		return NULL;
 130	return ret;
 131}
 132
 133static struct mr_table *__ip6mr_get_table(struct net *net, u32 id)
 134{
 135	struct mr_table *mrt;
 136
 137	ip6mr_for_each_table(mrt, net) {
 138		if (mrt->id == id)
 139			return mrt;
 140	}
 141	return NULL;
 142}
 143
 144static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
 145{
 146	struct mr_table *mrt;
 147
 148	rcu_read_lock();
 149	mrt = __ip6mr_get_table(net, id);
 150	rcu_read_unlock();
 151	return mrt;
 152}
 153
 154static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 155			    struct mr_table **mrt)
 156{
 157	int err;
 158	struct ip6mr_result res;
 159	struct fib_lookup_arg arg = {
 160		.result = &res,
 161		.flags = FIB_LOOKUP_NOREF,
 162	};
 163
 164	/* update flow if oif or iif point to device enslaved to l3mdev */
 165	l3mdev_update_flow(net, flowi6_to_flowi(flp6));
 166
 167	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 168			       flowi6_to_flowi(flp6), 0, &arg);
 169	if (err < 0)
 170		return err;
 171	*mrt = res.mrt;
 172	return 0;
 173}
 174
 175static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
 176			     int flags, struct fib_lookup_arg *arg)
 177{
 178	struct ip6mr_result *res = arg->result;
 179	struct mr_table *mrt;
 180
 181	switch (rule->action) {
 182	case FR_ACT_TO_TBL:
 183		break;
 184	case FR_ACT_UNREACHABLE:
 185		return -ENETUNREACH;
 186	case FR_ACT_PROHIBIT:
 187		return -EACCES;
 188	case FR_ACT_BLACKHOLE:
 189	default:
 190		return -EINVAL;
 191	}
 192
 193	arg->table = fib_rule_get_table(rule, arg);
 194
 195	mrt = __ip6mr_get_table(rule->fr_net, arg->table);
 196	if (!mrt)
 197		return -EAGAIN;
 198	res->mrt = mrt;
 199	return 0;
 200}
 201
 202static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
 203{
 204	return 1;
 205}
 206
 
 
 
 
 207static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
 208				struct fib_rule_hdr *frh, struct nlattr **tb,
 209				struct netlink_ext_ack *extack)
 210{
 211	return 0;
 212}
 213
 214static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 215			      struct nlattr **tb)
 216{
 217	return 1;
 218}
 219
 220static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
 221			   struct fib_rule_hdr *frh)
 222{
 223	frh->dst_len = 0;
 224	frh->src_len = 0;
 225	frh->tos     = 0;
 226	return 0;
 227}
 228
 229static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
 230	.family		= RTNL_FAMILY_IP6MR,
 231	.rule_size	= sizeof(struct ip6mr_rule),
 232	.addr_size	= sizeof(struct in6_addr),
 233	.action		= ip6mr_rule_action,
 234	.match		= ip6mr_rule_match,
 235	.configure	= ip6mr_rule_configure,
 236	.compare	= ip6mr_rule_compare,
 237	.fill		= ip6mr_rule_fill,
 238	.nlgroup	= RTNLGRP_IPV6_RULE,
 
 239	.owner		= THIS_MODULE,
 240};
 241
 242static int __net_init ip6mr_rules_init(struct net *net)
 243{
 244	struct fib_rules_ops *ops;
 245	struct mr_table *mrt;
 246	int err;
 247
 248	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
 249	if (IS_ERR(ops))
 250		return PTR_ERR(ops);
 251
 252	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
 253
 254	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 255	if (IS_ERR(mrt)) {
 256		err = PTR_ERR(mrt);
 257		goto err1;
 258	}
 259
 260	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT);
 261	if (err < 0)
 262		goto err2;
 263
 264	net->ipv6.mr6_rules_ops = ops;
 265	return 0;
 266
 267err2:
 268	rtnl_lock();
 269	ip6mr_free_table(mrt);
 270	rtnl_unlock();
 271err1:
 272	fib_rules_unregister(ops);
 273	return err;
 274}
 275
 276static void __net_exit ip6mr_rules_exit(struct net *net)
 277{
 278	struct mr_table *mrt, *next;
 279
 280	ASSERT_RTNL();
 281	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
 282		list_del(&mrt->list);
 283		ip6mr_free_table(mrt);
 284	}
 285	fib_rules_unregister(net->ipv6.mr6_rules_ops);
 
 286}
 287
 288static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
 289			    struct netlink_ext_ack *extack)
 290{
 291	return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
 292}
 293
 294static unsigned int ip6mr_rules_seq_read(const struct net *net)
 295{
 296	return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
 297}
 298
 299bool ip6mr_rule_default(const struct fib_rule *rule)
 300{
 301	return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
 302	       rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
 303}
 304EXPORT_SYMBOL(ip6mr_rule_default);
 305#else
 306#define ip6mr_for_each_table(mrt, net) \
 307	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
 308
 309static bool ip6mr_can_free_table(struct net *net)
 310{
 311	return !check_net(net);
 312}
 313
 314static struct mr_table *ip6mr_mr_table_iter(struct net *net,
 315					    struct mr_table *mrt)
 316{
 317	if (!mrt)
 318		return net->ipv6.mrt6;
 319	return NULL;
 320}
 321
 322static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
 323{
 324	return net->ipv6.mrt6;
 325}
 326
 327#define __ip6mr_get_table ip6mr_get_table
 328
 329static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 330			    struct mr_table **mrt)
 331{
 332	*mrt = net->ipv6.mrt6;
 333	return 0;
 334}
 335
 336static int __net_init ip6mr_rules_init(struct net *net)
 337{
 338	struct mr_table *mrt;
 339
 340	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 341	if (IS_ERR(mrt))
 342		return PTR_ERR(mrt);
 343	net->ipv6.mrt6 = mrt;
 344	return 0;
 345}
 346
 347static void __net_exit ip6mr_rules_exit(struct net *net)
 348{
 349	ASSERT_RTNL();
 350	ip6mr_free_table(net->ipv6.mrt6);
 351	net->ipv6.mrt6 = NULL;
 
 352}
 
 353
 354static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
 355			    struct netlink_ext_ack *extack)
 356{
 357	return 0;
 358}
 
 
 
 
 359
 360static unsigned int ip6mr_rules_seq_read(const struct net *net)
 361{
 362	return 0;
 363}
 364#endif
 365
 366static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
 367			  const void *ptr)
 368{
 369	const struct mfc6_cache_cmp_arg *cmparg = arg->key;
 370	struct mfc6_cache *c = (struct mfc6_cache *)ptr;
 371
 372	return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
 373	       !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
 374}
 375
 376static const struct rhashtable_params ip6mr_rht_params = {
 377	.head_offset = offsetof(struct mr_mfc, mnode),
 378	.key_offset = offsetof(struct mfc6_cache, cmparg),
 379	.key_len = sizeof(struct mfc6_cache_cmp_arg),
 380	.nelem_hint = 3,
 381	.obj_cmpfn = ip6mr_hash_cmp,
 382	.automatic_shrinking = true,
 383};
 384
 385static void ip6mr_new_table_set(struct mr_table *mrt,
 386				struct net *net)
 387{
 388#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 389	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
 390#endif
 
 
 
 
 
 
 
 
 391}
 392
 393static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
 394	.mf6c_origin = IN6ADDR_ANY_INIT,
 395	.mf6c_mcastgrp = IN6ADDR_ANY_INIT,
 
 
 
 
 396};
 397
 398static struct mr_table_ops ip6mr_mr_table_ops = {
 399	.rht_params = &ip6mr_rht_params,
 400	.cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
 401};
 402
 403static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
 
 404{
 405	struct mr_table *mrt;
 
 
 
 
 
 
 
 
 
 
 406
 407	mrt = __ip6mr_get_table(net, id);
 408	if (mrt)
 409		return mrt;
 
 
 
 410
 411	return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
 412			      ipmr_expire_process, ip6mr_new_table_set);
 413}
 414
 415static void ip6mr_free_table(struct mr_table *mrt)
 
 
 
 
 
 
 
 
 
 
 
 
 416{
 417	struct net *net = read_pnet(&mrt->net);
 418
 419	WARN_ON_ONCE(!ip6mr_can_free_table(net));
 420
 421	timer_shutdown_sync(&mrt->ipmr_expire_timer);
 422	mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
 423				 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
 424	rhltable_destroy(&mrt->mfc_hash);
 425	kfree(mrt);
 426}
 427
 428#ifdef CONFIG_PROC_FS
 429/* The /proc interfaces to multicast routing
 430 * /proc/ip6_mr_cache /proc/ip6_mr_vif
 431 */
 432
 433static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
 434	__acquires(RCU)
 435{
 436	struct mr_vif_iter *iter = seq->private;
 437	struct net *net = seq_file_net(seq);
 438	struct mr_table *mrt;
 439
 440	rcu_read_lock();
 441	mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT);
 442	if (!mrt) {
 443		rcu_read_unlock();
 444		return ERR_PTR(-ENOENT);
 445	}
 446
 447	iter->mrt = mrt;
 448
 449	return mr_vif_seq_start(seq, pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450}
 451
 452static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
 453	__releases(RCU)
 454{
 455	rcu_read_unlock();
 456}
 457
 458static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
 459{
 460	struct mr_vif_iter *iter = seq->private;
 461	struct mr_table *mrt = iter->mrt;
 462
 463	if (v == SEQ_START_TOKEN) {
 464		seq_puts(seq,
 465			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
 466	} else {
 467		const struct vif_device *vif = v;
 468		const struct net_device *vif_dev;
 469		const char *name;
 470
 471		vif_dev = vif_dev_read(vif);
 472		name = vif_dev ? vif_dev->name : "none";
 473
 474		seq_printf(seq,
 475			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
 476			   vif - mrt->vif_table,
 477			   name, vif->bytes_in, vif->pkt_in,
 478			   vif->bytes_out, vif->pkt_out,
 479			   vif->flags);
 480	}
 481	return 0;
 482}
 483
 484static const struct seq_operations ip6mr_vif_seq_ops = {
 485	.start = ip6mr_vif_seq_start,
 486	.next  = mr_vif_seq_next,
 487	.stop  = ip6mr_vif_seq_stop,
 488	.show  = ip6mr_vif_seq_show,
 489};
 490
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 491static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 492{
 
 493	struct net *net = seq_file_net(seq);
 494	struct mr_table *mrt;
 495
 496	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 497	if (!mrt)
 498		return ERR_PTR(-ENOENT);
 499
 500	return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501}
 502
 503static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
 504{
 505	int n;
 506
 507	if (v == SEQ_START_TOKEN) {
 508		seq_puts(seq,
 509			 "Group                            "
 510			 "Origin                           "
 511			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
 512	} else {
 513		const struct mfc6_cache *mfc = v;
 514		const struct mr_mfc_iter *it = seq->private;
 515		struct mr_table *mrt = it->mrt;
 516
 517		seq_printf(seq, "%pI6 %pI6 %-3hd",
 518			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
 519			   mfc->_c.mfc_parent);
 520
 521		if (it->cache != &mrt->mfc_unres_queue) {
 522			seq_printf(seq, " %8lu %8lu %8lu",
 523				   atomic_long_read(&mfc->_c.mfc_un.res.pkt),
 524				   atomic_long_read(&mfc->_c.mfc_un.res.bytes),
 525				   atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
 526			for (n = mfc->_c.mfc_un.res.minvif;
 527			     n < mfc->_c.mfc_un.res.maxvif; n++) {
 528				if (VIF_EXISTS(mrt, n) &&
 529				    mfc->_c.mfc_un.res.ttls[n] < 255)
 530					seq_printf(seq,
 531						   " %2d:%-3d", n,
 532						   mfc->_c.mfc_un.res.ttls[n]);
 533			}
 534		} else {
 535			/* unresolved mfc_caches don't contain
 536			 * pkt, bytes and wrong_if values
 537			 */
 538			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
 539		}
 540		seq_putc(seq, '\n');
 541	}
 542	return 0;
 543}
 544
 545static const struct seq_operations ipmr_mfc_seq_ops = {
 546	.start = ipmr_mfc_seq_start,
 547	.next  = mr_mfc_seq_next,
 548	.stop  = mr_mfc_seq_stop,
 549	.show  = ipmr_mfc_seq_show,
 550};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551#endif
 552
 553#ifdef CONFIG_IPV6_PIMSM_V2
 554
 555static int pim6_rcv(struct sk_buff *skb)
 556{
 557	struct pimreghdr *pim;
 558	struct ipv6hdr   *encap;
 559	struct net_device  *reg_dev = NULL;
 560	struct net *net = dev_net(skb->dev);
 561	struct mr_table *mrt;
 562	struct flowi6 fl6 = {
 563		.flowi6_iif	= skb->dev->ifindex,
 564		.flowi6_mark	= skb->mark,
 565	};
 566	int reg_vif_num;
 567
 568	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
 569		goto drop;
 570
 571	pim = (struct pimreghdr *)skb_transport_header(skb);
 572	if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
 573	    (pim->flags & PIM_NULL_REGISTER) ||
 574	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 575			     sizeof(*pim), IPPROTO_PIM,
 576			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
 577	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
 578		goto drop;
 579
 580	/* check if the inner packet is destined to mcast group */
 581	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
 582				   sizeof(*pim));
 583
 584	if (!ipv6_addr_is_multicast(&encap->daddr) ||
 585	    encap->payload_len == 0 ||
 586	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
 587		goto drop;
 588
 589	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 590		goto drop;
 
 591
 592	/* Pairs with WRITE_ONCE() in mif6_add()/mif6_delete() */
 593	reg_vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
 594	if (reg_vif_num >= 0)
 595		reg_dev = vif_dev_read(&mrt->vif_table[reg_vif_num]);
 
 
 
 596
 597	if (!reg_dev)
 598		goto drop;
 599
 600	skb->mac_header = skb->network_header;
 601	skb_pull(skb, (u8 *)encap - skb->data);
 602	skb_reset_network_header(skb);
 603	skb->protocol = htons(ETH_P_IPV6);
 604	skb->ip_summed = CHECKSUM_NONE;
 605
 606	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
 607
 608	netif_rx(skb);
 609
 
 610	return 0;
 611 drop:
 612	kfree_skb(skb);
 613	return 0;
 614}
 615
 616static const struct inet6_protocol pim6_protocol = {
 617	.handler	=	pim6_rcv,
 618};
 619
 620/* Service routines creating virtual interfaces: PIMREG */
 621
 622static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
 623				      struct net_device *dev)
 624{
 625	struct net *net = dev_net(dev);
 626	struct mr_table *mrt;
 627	struct flowi6 fl6 = {
 628		.flowi6_oif	= dev->ifindex,
 629		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 630		.flowi6_mark	= skb->mark,
 631	};
 
 632
 633	if (!pskb_inet_may_pull(skb))
 634		goto tx_err;
 
 
 
 635
 636	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 637		goto tx_err;
 638
 639	DEV_STATS_ADD(dev, tx_bytes, skb->len);
 640	DEV_STATS_INC(dev, tx_packets);
 641	rcu_read_lock();
 642	ip6mr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
 643			   MRT6MSG_WHOLEPKT);
 644	rcu_read_unlock();
 645	kfree_skb(skb);
 646	return NETDEV_TX_OK;
 647
 648tx_err:
 649	DEV_STATS_INC(dev, tx_errors);
 650	kfree_skb(skb);
 651	return NETDEV_TX_OK;
 652}
 653
 654static int reg_vif_get_iflink(const struct net_device *dev)
 655{
 656	return 0;
 657}
 658
 659static const struct net_device_ops reg_vif_netdev_ops = {
 660	.ndo_start_xmit	= reg_vif_xmit,
 661	.ndo_get_iflink = reg_vif_get_iflink,
 662};
 663
 664static void reg_vif_setup(struct net_device *dev)
 665{
 666	dev->type		= ARPHRD_PIMREG;
 667	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
 668	dev->flags		= IFF_NOARP;
 669	dev->netdev_ops		= &reg_vif_netdev_ops;
 670	dev->needs_free_netdev	= true;
 671	dev->netns_local	= true;
 672}
 673
 674static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
 675{
 676	struct net_device *dev;
 677	char name[IFNAMSIZ];
 678
 679	if (mrt->id == RT6_TABLE_DFLT)
 680		sprintf(name, "pim6reg");
 681	else
 682		sprintf(name, "pim6reg%u", mrt->id);
 683
 684	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
 685	if (!dev)
 686		return NULL;
 687
 688	dev_net_set(dev, net);
 689
 690	if (register_netdevice(dev)) {
 691		free_netdev(dev);
 692		return NULL;
 693	}
 694
 695	if (dev_open(dev, NULL))
 696		goto failure;
 697
 698	dev_hold(dev);
 699	return dev;
 700
 701failure:
 702	unregister_netdevice(dev);
 703	return NULL;
 704}
 705#endif
 706
 707static int call_ip6mr_vif_entry_notifiers(struct net *net,
 708					  enum fib_event_type event_type,
 709					  struct vif_device *vif,
 710					  struct net_device *vif_dev,
 711					  mifi_t vif_index, u32 tb_id)
 712{
 713	return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
 714				     vif, vif_dev, vif_index, tb_id,
 715				     &net->ipv6.ipmr_seq);
 716}
 717
 718static int call_ip6mr_mfc_entry_notifiers(struct net *net,
 719					  enum fib_event_type event_type,
 720					  struct mfc6_cache *mfc, u32 tb_id)
 721{
 722	return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
 723				     &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
 724}
 725
 726/* Delete a VIF entry */
 727static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
 728		       struct list_head *head)
 729{
 730	struct vif_device *v;
 731	struct net_device *dev;
 732	struct inet6_dev *in6_dev;
 733
 734	if (vifi < 0 || vifi >= mrt->maxvif)
 735		return -EADDRNOTAVAIL;
 736
 737	v = &mrt->vif_table[vifi];
 
 
 
 
 738
 739	dev = rtnl_dereference(v->dev);
 740	if (!dev)
 741		return -EADDRNOTAVAIL;
 742
 743	call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
 744				       FIB_EVENT_VIF_DEL, v, dev,
 745				       vifi, mrt->id);
 746	spin_lock(&mrt_lock);
 747	RCU_INIT_POINTER(v->dev, NULL);
 748
 749#ifdef CONFIG_IPV6_PIMSM_V2
 750	if (vifi == mrt->mroute_reg_vif_num) {
 751		/* Pairs with READ_ONCE() in ip6mr_cache_report() and reg_vif_xmit() */
 752		WRITE_ONCE(mrt->mroute_reg_vif_num, -1);
 753	}
 754#endif
 755
 756	if (vifi + 1 == mrt->maxvif) {
 757		int tmp;
 758		for (tmp = vifi - 1; tmp >= 0; tmp--) {
 759			if (VIF_EXISTS(mrt, tmp))
 760				break;
 761		}
 762		WRITE_ONCE(mrt->maxvif, tmp + 1);
 763	}
 764
 765	spin_unlock(&mrt_lock);
 766
 767	dev_set_allmulti(dev, -1);
 768
 769	in6_dev = __in6_dev_get(dev);
 770	if (in6_dev) {
 771		atomic_dec(&in6_dev->cnf.mc_forwarding);
 772		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
 773					     NETCONFA_MC_FORWARDING,
 774					     dev->ifindex, &in6_dev->cnf);
 775	}
 776
 777	if ((v->flags & MIFF_REGISTER) && !notify)
 778		unregister_netdevice_queue(dev, head);
 779
 780	netdev_put(dev, &v->dev_tracker);
 781	return 0;
 782}
 783
 784static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
 785{
 786	struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
 787
 788	kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
 789}
 790
 791static inline void ip6mr_cache_free(struct mfc6_cache *c)
 792{
 793	call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
 794}
 795
 796/* Destroy an unresolved cache entry, killing queued skbs
 797   and reporting error to netlink readers.
 798 */
 799
 800static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
 801{
 802	struct net *net = read_pnet(&mrt->net);
 803	struct sk_buff *skb;
 804
 805	atomic_dec(&mrt->cache_resolve_queue_len);
 806
 807	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
 808		if (ipv6_hdr(skb)->version == 0) {
 809			struct nlmsghdr *nlh = skb_pull(skb,
 810							sizeof(struct ipv6hdr));
 811			nlh->nlmsg_type = NLMSG_ERROR;
 812			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
 813			skb_trim(skb, nlh->nlmsg_len);
 814			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
 815			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
 816		} else
 817			kfree_skb(skb);
 818	}
 819
 820	ip6mr_cache_free(c);
 821}
 822
 823
 824/* Timer process for all the unresolved queue. */
 825
 826static void ipmr_do_expire_process(struct mr_table *mrt)
 827{
 828	unsigned long now = jiffies;
 829	unsigned long expires = 10 * HZ;
 830	struct mr_mfc *c, *next;
 831
 832	list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
 833		if (time_after(c->mfc_un.unres.expires, now)) {
 834			/* not yet... */
 835			unsigned long interval = c->mfc_un.unres.expires - now;
 836			if (interval < expires)
 837				expires = interval;
 838			continue;
 839		}
 840
 841		list_del(&c->list);
 842		mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
 843		ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
 844	}
 845
 846	if (!list_empty(&mrt->mfc_unres_queue))
 847		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 848}
 849
 850static void ipmr_expire_process(struct timer_list *t)
 851{
 852	struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
 853
 854	if (!spin_trylock(&mfc_unres_lock)) {
 855		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
 856		return;
 857	}
 858
 859	if (!list_empty(&mrt->mfc_unres_queue))
 860		ipmr_do_expire_process(mrt);
 861
 862	spin_unlock(&mfc_unres_lock);
 863}
 864
 865/* Fill oifs list. It is called under locked mrt_lock. */
 866
 867static void ip6mr_update_thresholds(struct mr_table *mrt,
 868				    struct mr_mfc *cache,
 869				    unsigned char *ttls)
 870{
 871	int vifi;
 872
 873	cache->mfc_un.res.minvif = MAXMIFS;
 874	cache->mfc_un.res.maxvif = 0;
 875	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
 876
 877	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
 878		if (VIF_EXISTS(mrt, vifi) &&
 879		    ttls[vifi] && ttls[vifi] < 255) {
 880			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
 881			if (cache->mfc_un.res.minvif > vifi)
 882				cache->mfc_un.res.minvif = vifi;
 883			if (cache->mfc_un.res.maxvif <= vifi)
 884				cache->mfc_un.res.maxvif = vifi + 1;
 885		}
 886	}
 887	WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
 888}
 889
 890static int mif6_add(struct net *net, struct mr_table *mrt,
 891		    struct mif6ctl *vifc, int mrtsock)
 892{
 893	int vifi = vifc->mif6c_mifi;
 894	struct vif_device *v = &mrt->vif_table[vifi];
 895	struct net_device *dev;
 896	struct inet6_dev *in6_dev;
 897	int err;
 898
 899	/* Is vif busy ? */
 900	if (VIF_EXISTS(mrt, vifi))
 901		return -EADDRINUSE;
 902
 903	switch (vifc->mif6c_flags) {
 904#ifdef CONFIG_IPV6_PIMSM_V2
 905	case MIFF_REGISTER:
 906		/*
 907		 * Special Purpose VIF in PIM
 908		 * All the packets will be sent to the daemon
 909		 */
 910		if (mrt->mroute_reg_vif_num >= 0)
 911			return -EADDRINUSE;
 912		dev = ip6mr_reg_vif(net, mrt);
 913		if (!dev)
 914			return -ENOBUFS;
 915		err = dev_set_allmulti(dev, 1);
 916		if (err) {
 917			unregister_netdevice(dev);
 918			dev_put(dev);
 919			return err;
 920		}
 921		break;
 922#endif
 923	case 0:
 924		dev = dev_get_by_index(net, vifc->mif6c_pifi);
 925		if (!dev)
 926			return -EADDRNOTAVAIL;
 927		err = dev_set_allmulti(dev, 1);
 928		if (err) {
 929			dev_put(dev);
 930			return err;
 931		}
 932		break;
 933	default:
 934		return -EINVAL;
 935	}
 936
 937	in6_dev = __in6_dev_get(dev);
 938	if (in6_dev) {
 939		atomic_inc(&in6_dev->cnf.mc_forwarding);
 940		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
 941					     NETCONFA_MC_FORWARDING,
 942					     dev->ifindex, &in6_dev->cnf);
 943	}
 944
 945	/* Fill in the VIF structures */
 946	vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
 947			vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
 948			MIFF_REGISTER);
 
 
 
 
 
 
 
 
 
 
 
 949
 950	/* And finish update writing critical data */
 951	spin_lock(&mrt_lock);
 952	rcu_assign_pointer(v->dev, dev);
 953	netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
 954#ifdef CONFIG_IPV6_PIMSM_V2
 955	if (v->flags & MIFF_REGISTER)
 956		WRITE_ONCE(mrt->mroute_reg_vif_num, vifi);
 957#endif
 958	if (vifi + 1 > mrt->maxvif)
 959		WRITE_ONCE(mrt->maxvif, vifi + 1);
 960	spin_unlock(&mrt_lock);
 961	call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
 962				       v, dev, vifi, mrt->id);
 963	return 0;
 964}
 965
 966static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
 967					   const struct in6_addr *origin,
 968					   const struct in6_addr *mcastgrp)
 969{
 970	struct mfc6_cache_cmp_arg arg = {
 971		.mf6c_origin = *origin,
 972		.mf6c_mcastgrp = *mcastgrp,
 973	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974
 975	return mr_mfc_find(mrt, &arg);
 976}
 977
 978/* Look for a (*,G) entry */
 979static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
 980					       struct in6_addr *mcastgrp,
 981					       mifi_t mifi)
 982{
 983	struct mfc6_cache_cmp_arg arg = {
 984		.mf6c_origin = in6addr_any,
 985		.mf6c_mcastgrp = *mcastgrp,
 986	};
 987
 988	if (ipv6_addr_any(mcastgrp))
 989		return mr_mfc_find_any_parent(mrt, mifi);
 990	return mr_mfc_find_any(mrt, mifi, &arg);
 991}
 992
 993/* Look for a (S,G,iif) entry if parent != -1 */
 994static struct mfc6_cache *
 995ip6mr_cache_find_parent(struct mr_table *mrt,
 996			const struct in6_addr *origin,
 997			const struct in6_addr *mcastgrp,
 998			int parent)
 999{
1000	struct mfc6_cache_cmp_arg arg = {
1001		.mf6c_origin = *origin,
1002		.mf6c_mcastgrp = *mcastgrp,
1003	};
 
1004
1005	return mr_mfc_find_parent(mrt, &arg, parent);
 
1006}
1007
1008/* Allocate a multicast cache entry */
 
 
1009static struct mfc6_cache *ip6mr_cache_alloc(void)
1010{
1011	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1012	if (!c)
1013		return NULL;
1014	c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1015	c->_c.mfc_un.res.minvif = MAXMIFS;
1016	c->_c.free = ip6mr_cache_free_rcu;
1017	refcount_set(&c->_c.mfc_un.res.refcount, 1);
1018	return c;
1019}
1020
1021static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1022{
1023	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1024	if (!c)
1025		return NULL;
1026	skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
1027	c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
1028	return c;
1029}
1030
1031/*
1032 *	A cache entry has gone into a resolved state from queued
1033 */
1034
1035static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1036				struct mfc6_cache *uc, struct mfc6_cache *c)
1037{
1038	struct sk_buff *skb;
1039
1040	/*
1041	 *	Play the pending entries through our router
1042	 */
1043
1044	while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1045		if (ipv6_hdr(skb)->version == 0) {
1046			struct nlmsghdr *nlh = skb_pull(skb,
1047							sizeof(struct ipv6hdr));
1048
1049			if (mr_fill_mroute(mrt, skb, &c->_c,
1050					   nlmsg_data(nlh)) > 0) {
1051				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1052			} else {
1053				nlh->nlmsg_type = NLMSG_ERROR;
1054				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1055				skb_trim(skb, nlh->nlmsg_len);
1056				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1057			}
1058			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1059		} else {
1060			rcu_read_lock();
1061			ip6_mr_forward(net, mrt, skb->dev, skb, c);
1062			rcu_read_unlock();
1063		}
1064	}
1065}
1066
1067/*
1068 *	Bounce a cache query up to pim6sd and netlink.
 
1069 *
1070 *	Called under rcu_read_lock()
1071 */
1072
1073static int ip6mr_cache_report(const struct mr_table *mrt, struct sk_buff *pkt,
1074			      mifi_t mifi, int assert)
1075{
1076	struct sock *mroute6_sk;
1077	struct sk_buff *skb;
1078	struct mrt6msg *msg;
1079	int ret;
1080
1081#ifdef CONFIG_IPV6_PIMSM_V2
1082	if (assert == MRT6MSG_WHOLEPKT || assert == MRT6MSG_WRMIFWHOLE)
1083		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1084						+sizeof(*msg));
1085	else
1086#endif
1087		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1088
1089	if (!skb)
1090		return -ENOBUFS;
1091
1092	/* I suppose that internal messages
1093	 * do not require checksums */
1094
1095	skb->ip_summed = CHECKSUM_UNNECESSARY;
1096
1097#ifdef CONFIG_IPV6_PIMSM_V2
1098	if (assert == MRT6MSG_WHOLEPKT || assert == MRT6MSG_WRMIFWHOLE) {
1099		/* Ugly, but we have no choice with this interface.
1100		   Duplicate old header, fix length etc.
1101		   And all this only to mangle msg->im6_msgtype and
1102		   to set msg->im6_mbz to "mbz" :-)
1103		 */
1104		__skb_pull(skb, skb_network_offset(pkt));
1105
1106		skb_push(skb, sizeof(*msg));
1107		skb_reset_transport_header(skb);
1108		msg = (struct mrt6msg *)skb_transport_header(skb);
1109		msg->im6_mbz = 0;
1110		msg->im6_msgtype = assert;
1111		if (assert == MRT6MSG_WRMIFWHOLE)
1112			msg->im6_mif = mifi;
1113		else
1114			msg->im6_mif = READ_ONCE(mrt->mroute_reg_vif_num);
1115		msg->im6_pad = 0;
1116		msg->im6_src = ipv6_hdr(pkt)->saddr;
1117		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1118
1119		skb->ip_summed = CHECKSUM_UNNECESSARY;
1120	} else
1121#endif
1122	{
1123	/*
1124	 *	Copy the IP header
1125	 */
1126
1127	skb_put(skb, sizeof(struct ipv6hdr));
1128	skb_reset_network_header(skb);
1129	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1130
1131	/*
1132	 *	Add our header
1133	 */
1134	skb_put(skb, sizeof(*msg));
1135	skb_reset_transport_header(skb);
1136	msg = (struct mrt6msg *)skb_transport_header(skb);
1137
1138	msg->im6_mbz = 0;
1139	msg->im6_msgtype = assert;
1140	msg->im6_mif = mifi;
1141	msg->im6_pad = 0;
1142	msg->im6_src = ipv6_hdr(pkt)->saddr;
1143	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1144
1145	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1146	skb->ip_summed = CHECKSUM_UNNECESSARY;
1147	}
1148
1149	mroute6_sk = rcu_dereference(mrt->mroute_sk);
1150	if (!mroute6_sk) {
1151		kfree_skb(skb);
1152		return -EINVAL;
1153	}
1154
1155	mrt6msg_netlink_event(mrt, skb);
1156
1157	/* Deliver to user space multicast routing algorithms */
1158	ret = sock_queue_rcv_skb(mroute6_sk, skb);
1159
1160	if (ret < 0) {
1161		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1162		kfree_skb(skb);
1163	}
1164
1165	return ret;
1166}
1167
1168/* Queue a packet for resolution. It gets locked cache entry! */
1169static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1170				  struct sk_buff *skb, struct net_device *dev)
 
 
 
1171{
1172	struct mfc6_cache *c;
1173	bool found = false;
1174	int err;
 
1175
1176	spin_lock_bh(&mfc_unres_lock);
1177	list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1178		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1179		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1180			found = true;
1181			break;
1182		}
1183	}
1184
1185	if (!found) {
1186		/*
1187		 *	Create a new entry if allowable
1188		 */
1189
1190		c = ip6mr_cache_alloc_unres();
1191		if (!c) {
1192			spin_unlock_bh(&mfc_unres_lock);
1193
1194			kfree_skb(skb);
1195			return -ENOBUFS;
1196		}
1197
1198		/* Fill in the new cache entry */
1199		c->_c.mfc_parent = -1;
 
 
1200		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1201		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1202
1203		/*
1204		 *	Reflect first query at pim6sd
1205		 */
1206		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1207		if (err < 0) {
1208			/* If the report failed throw the cache entry
1209			   out - Brad Parker
1210			 */
1211			spin_unlock_bh(&mfc_unres_lock);
1212
1213			ip6mr_cache_free(c);
1214			kfree_skb(skb);
1215			return err;
1216		}
1217
1218		atomic_inc(&mrt->cache_resolve_queue_len);
1219		list_add(&c->_c.list, &mrt->mfc_unres_queue);
1220		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1221
1222		ipmr_do_expire_process(mrt);
1223	}
1224
1225	/* See if we can append the packet */
1226	if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
 
 
1227		kfree_skb(skb);
1228		err = -ENOBUFS;
1229	} else {
1230		if (dev) {
1231			skb->dev = dev;
1232			skb->skb_iif = dev->ifindex;
1233		}
1234		skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1235		err = 0;
1236	}
1237
1238	spin_unlock_bh(&mfc_unres_lock);
1239	return err;
1240}
1241
1242/*
1243 *	MFC6 cache manipulation by user space
1244 */
1245
1246static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1247			    int parent)
1248{
1249	struct mfc6_cache *c;
 
 
 
1250
1251	/* The entries are added/deleted only under RTNL */
1252	rcu_read_lock();
1253	c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1254				    &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1255	rcu_read_unlock();
1256	if (!c)
1257		return -ENOENT;
1258	rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1259	list_del_rcu(&c->_c.list);
1260
1261	call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1262				       FIB_EVENT_ENTRY_DEL, c, mrt->id);
1263	mr6_netlink_event(mrt, c, RTM_DELROUTE);
1264	mr_cache_put(&c->_c);
1265	return 0;
 
1266}
1267
1268static int ip6mr_device_event(struct notifier_block *this,
1269			      unsigned long event, void *ptr)
1270{
1271	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1272	struct net *net = dev_net(dev);
1273	struct mr_table *mrt;
1274	struct vif_device *v;
1275	int ct;
 
1276
1277	if (event != NETDEV_UNREGISTER)
1278		return NOTIFY_DONE;
1279
1280	ip6mr_for_each_table(mrt, net) {
1281		v = &mrt->vif_table[0];
1282		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1283			if (rcu_access_pointer(v->dev) == dev)
1284				mif6_delete(mrt, ct, 1, NULL);
1285		}
1286	}
 
1287
1288	return NOTIFY_DONE;
1289}
1290
1291static unsigned int ip6mr_seq_read(const struct net *net)
1292{
1293	return READ_ONCE(net->ipv6.ipmr_seq) + ip6mr_rules_seq_read(net);
1294}
1295
1296static int ip6mr_dump(struct net *net, struct notifier_block *nb,
1297		      struct netlink_ext_ack *extack)
1298{
1299	return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1300		       ip6mr_mr_table_iter, extack);
1301}
1302
1303static struct notifier_block ip6_mr_notifier = {
1304	.notifier_call = ip6mr_device_event
1305};
1306
1307static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1308	.family		= RTNL_FAMILY_IP6MR,
1309	.fib_seq_read	= ip6mr_seq_read,
1310	.fib_dump	= ip6mr_dump,
1311	.owner		= THIS_MODULE,
1312};
1313
1314static int __net_init ip6mr_notifier_init(struct net *net)
1315{
1316	struct fib_notifier_ops *ops;
1317
1318	net->ipv6.ipmr_seq = 0;
1319
1320	ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1321	if (IS_ERR(ops))
1322		return PTR_ERR(ops);
1323
1324	net->ipv6.ip6mr_notifier_ops = ops;
1325
1326	return 0;
1327}
1328
1329static void __net_exit ip6mr_notifier_exit(struct net *net)
1330{
1331	fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1332	net->ipv6.ip6mr_notifier_ops = NULL;
1333}
1334
1335/* Setup for IP multicast routing */
1336static int __net_init ip6mr_net_init(struct net *net)
1337{
1338	int err;
1339
1340	err = ip6mr_notifier_init(net);
1341	if (err)
1342		return err;
1343
1344	err = ip6mr_rules_init(net);
1345	if (err < 0)
1346		goto ip6mr_rules_fail;
1347
1348#ifdef CONFIG_PROC_FS
1349	err = -ENOMEM;
1350	if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1351			sizeof(struct mr_vif_iter)))
1352		goto proc_vif_fail;
1353	if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1354			sizeof(struct mr_mfc_iter)))
1355		goto proc_cache_fail;
1356#endif
1357
1358	return 0;
1359
1360#ifdef CONFIG_PROC_FS
1361proc_cache_fail:
1362	remove_proc_entry("ip6_mr_vif", net->proc_net);
1363proc_vif_fail:
1364	rtnl_lock();
1365	ip6mr_rules_exit(net);
1366	rtnl_unlock();
1367#endif
1368ip6mr_rules_fail:
1369	ip6mr_notifier_exit(net);
1370	return err;
1371}
1372
1373static void __net_exit ip6mr_net_exit(struct net *net)
1374{
1375#ifdef CONFIG_PROC_FS
1376	remove_proc_entry("ip6_mr_cache", net->proc_net);
1377	remove_proc_entry("ip6_mr_vif", net->proc_net);
1378#endif
1379	ip6mr_notifier_exit(net);
1380}
1381
1382static void __net_exit ip6mr_net_exit_batch(struct list_head *net_list)
1383{
1384	struct net *net;
1385
1386	rtnl_lock();
1387	list_for_each_entry(net, net_list, exit_list)
1388		ip6mr_rules_exit(net);
1389	rtnl_unlock();
1390}
1391
1392static struct pernet_operations ip6mr_net_ops = {
1393	.init = ip6mr_net_init,
1394	.exit = ip6mr_net_exit,
1395	.exit_batch = ip6mr_net_exit_batch,
1396};
1397
1398static const struct rtnl_msg_handler ip6mr_rtnl_msg_handlers[] __initconst_or_module = {
1399	{.owner = THIS_MODULE, .protocol = RTNL_FAMILY_IP6MR,
1400	 .msgtype = RTM_GETROUTE,
1401	 .doit = ip6mr_rtm_getroute, .dumpit = ip6mr_rtm_dumproute},
1402};
1403
1404int __init ip6_mr_init(void)
1405{
1406	int err;
1407
1408	mrt_cachep = KMEM_CACHE(mfc6_cache, SLAB_HWCACHE_ALIGN);
 
 
 
1409	if (!mrt_cachep)
1410		return -ENOMEM;
1411
1412	err = register_pernet_subsys(&ip6mr_net_ops);
1413	if (err)
1414		goto reg_pernet_fail;
1415
1416	err = register_netdevice_notifier(&ip6_mr_notifier);
1417	if (err)
1418		goto reg_notif_fail;
1419#ifdef CONFIG_IPV6_PIMSM_V2
1420	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1421		pr_err("%s: can't add PIM protocol\n", __func__);
1422		err = -EAGAIN;
1423		goto add_proto_fail;
1424	}
1425#endif
1426	err = rtnl_register_many(ip6mr_rtnl_msg_handlers);
1427	if (!err)
1428		return 0;
1429
1430#ifdef CONFIG_IPV6_PIMSM_V2
1431	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1432add_proto_fail:
1433	unregister_netdevice_notifier(&ip6_mr_notifier);
1434#endif
1435reg_notif_fail:
1436	unregister_pernet_subsys(&ip6mr_net_ops);
1437reg_pernet_fail:
1438	kmem_cache_destroy(mrt_cachep);
1439	return err;
1440}
1441
1442void __init ip6_mr_cleanup(void)
1443{
1444	rtnl_unregister_many(ip6mr_rtnl_msg_handlers);
1445#ifdef CONFIG_IPV6_PIMSM_V2
1446	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1447#endif
1448	unregister_netdevice_notifier(&ip6_mr_notifier);
1449	unregister_pernet_subsys(&ip6mr_net_ops);
1450	kmem_cache_destroy(mrt_cachep);
1451}
1452
1453static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1454			 struct mf6cctl *mfc, int mrtsock, int parent)
1455{
 
 
 
1456	unsigned char ttls[MAXMIFS];
1457	struct mfc6_cache *uc, *c;
1458	struct mr_mfc *_uc;
1459	bool found;
1460	int i, err;
1461
1462	if (mfc->mf6cc_parent >= MAXMIFS)
1463		return -ENFILE;
1464
1465	memset(ttls, 255, MAXMIFS);
1466	for (i = 0; i < MAXMIFS; i++) {
1467		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1468			ttls[i] = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
1469	}
1470
1471	/* The entries are added/deleted only under RTNL */
1472	rcu_read_lock();
1473	c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1474				    &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1475	rcu_read_unlock();
1476	if (c) {
1477		spin_lock(&mrt_lock);
1478		c->_c.mfc_parent = mfc->mf6cc_parent;
1479		ip6mr_update_thresholds(mrt, &c->_c, ttls);
1480		if (!mrtsock)
1481			c->_c.mfc_flags |= MFC_STATIC;
1482		spin_unlock(&mrt_lock);
1483		call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1484					       c, mrt->id);
1485		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1486		return 0;
1487	}
1488
1489	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1490	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1491		return -EINVAL;
1492
1493	c = ip6mr_cache_alloc();
1494	if (!c)
1495		return -ENOMEM;
1496
1497	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1498	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1499	c->_c.mfc_parent = mfc->mf6cc_parent;
1500	ip6mr_update_thresholds(mrt, &c->_c, ttls);
1501	if (!mrtsock)
1502		c->_c.mfc_flags |= MFC_STATIC;
1503
1504	err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1505				  ip6mr_rht_params);
1506	if (err) {
1507		pr_err("ip6mr: rhtable insert error %d\n", err);
1508		ip6mr_cache_free(c);
1509		return err;
1510	}
1511	list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1512
1513	/* Check to see if we resolved a queued list. If so we
1514	 * need to send on the frames and tidy up.
 
1515	 */
1516	found = false;
1517	spin_lock_bh(&mfc_unres_lock);
1518	list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1519		uc = (struct mfc6_cache *)_uc;
1520		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1521		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1522			list_del(&_uc->list);
1523			atomic_dec(&mrt->cache_resolve_queue_len);
1524			found = true;
1525			break;
1526		}
1527	}
1528	if (list_empty(&mrt->mfc_unres_queue))
1529		del_timer(&mrt->ipmr_expire_timer);
1530	spin_unlock_bh(&mfc_unres_lock);
1531
1532	if (found) {
1533		ip6mr_cache_resolve(net, mrt, uc, c);
1534		ip6mr_cache_free(uc);
1535	}
1536	call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1537				       c, mrt->id);
1538	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1539	return 0;
1540}
1541
1542/*
1543 *	Close the multicast socket, and clear the vif tables etc
1544 */
1545
1546static void mroute_clean_tables(struct mr_table *mrt, int flags)
1547{
1548	struct mr_mfc *c, *tmp;
1549	LIST_HEAD(list);
1550	int i;
1551
1552	/* Shut down all active vif entries */
1553	if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1554		for (i = 0; i < mrt->maxvif; i++) {
1555			if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1556			     !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1557			    (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1558				continue;
1559			mif6_delete(mrt, i, 0, &list);
1560		}
1561		unregister_netdevice_many(&list);
1562	}
 
1563
1564	/* Wipe the cache */
1565	if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1566		list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1567			if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1568			    (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
 
1569				continue;
1570			rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1571			list_del_rcu(&c->list);
1572			call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1573						       FIB_EVENT_ENTRY_DEL,
1574						       (struct mfc6_cache *)c, mrt->id);
1575			mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1576			mr_cache_put(c);
1577		}
1578	}
1579
1580	if (flags & MRT6_FLUSH_MFC) {
1581		if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1582			spin_lock_bh(&mfc_unres_lock);
1583			list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1584				list_del(&c->list);
1585				mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1586						  RTM_DELROUTE);
1587				ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1588			}
1589			spin_unlock_bh(&mfc_unres_lock);
1590		}
 
1591	}
1592}
1593
1594static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1595{
1596	int err = 0;
1597	struct net *net = sock_net(sk);
1598
1599	rtnl_lock();
1600	spin_lock(&mrt_lock);
1601	if (rtnl_dereference(mrt->mroute_sk)) {
 
 
 
 
 
 
 
1602		err = -EADDRINUSE;
1603	} else {
1604		rcu_assign_pointer(mrt->mroute_sk, sk);
1605		sock_set_flag(sk, SOCK_RCU_FREE);
1606		atomic_inc(&net->ipv6.devconf_all->mc_forwarding);
1607	}
1608	spin_unlock(&mrt_lock);
1609
1610	if (!err)
1611		inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1612					     NETCONFA_MC_FORWARDING,
1613					     NETCONFA_IFINDEX_ALL,
1614					     net->ipv6.devconf_all);
1615	rtnl_unlock();
1616
1617	return err;
1618}
1619
1620int ip6mr_sk_done(struct sock *sk)
1621{
 
1622	struct net *net = sock_net(sk);
1623	struct ipv6_devconf *devconf;
1624	struct mr_table *mrt;
1625	int err = -EACCES;
1626
1627	if (sk->sk_type != SOCK_RAW ||
1628	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1629		return err;
1630
1631	devconf = net->ipv6.devconf_all;
1632	if (!devconf || !atomic_read(&devconf->mc_forwarding))
1633		return err;
1634
1635	rtnl_lock();
1636	ip6mr_for_each_table(mrt, net) {
1637		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1638			spin_lock(&mrt_lock);
1639			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1640			/* Note that mroute_sk had SOCK_RCU_FREE set,
1641			 * so the RCU grace period before sk freeing
1642			 * is guaranteed by sk_destruct()
1643			 */
1644			atomic_dec(&devconf->mc_forwarding);
1645			spin_unlock(&mrt_lock);
1646			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1647						     NETCONFA_MC_FORWARDING,
1648						     NETCONFA_IFINDEX_ALL,
1649						     net->ipv6.devconf_all);
 
1650
1651			mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
1652			err = 0;
1653			break;
1654		}
1655	}
1656	rtnl_unlock();
1657
1658	return err;
1659}
1660
1661bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1662{
1663	struct mr_table *mrt;
1664	struct flowi6 fl6 = {
1665		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1666		.flowi6_oif	= skb->dev->ifindex,
1667		.flowi6_mark	= skb->mark,
1668	};
1669
1670	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1671		return NULL;
1672
1673	return rcu_access_pointer(mrt->mroute_sk);
1674}
1675EXPORT_SYMBOL(mroute6_is_socket);
1676
1677/*
1678 *	Socket options and virtual interface manipulation. The whole
1679 *	virtual interface system is a complete heap, but unfortunately
1680 *	that's how BSD mrouted happens to think. Maybe one day with a proper
1681 *	MOSPF/PIM router set up we can clean this up.
1682 */
1683
1684int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
1685			  unsigned int optlen)
1686{
1687	int ret, parent = 0;
1688	struct mif6ctl vif;
1689	struct mf6cctl mfc;
1690	mifi_t mifi;
1691	struct net *net = sock_net(sk);
1692	struct mr_table *mrt;
1693
1694	if (sk->sk_type != SOCK_RAW ||
1695	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1696		return -EOPNOTSUPP;
1697
1698	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1699	if (!mrt)
1700		return -ENOENT;
1701
1702	if (optname != MRT6_INIT) {
1703		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1704		    !ns_capable(net->user_ns, CAP_NET_ADMIN))
1705			return -EACCES;
1706	}
1707
1708	switch (optname) {
1709	case MRT6_INIT:
 
 
 
1710		if (optlen < sizeof(int))
1711			return -EINVAL;
1712
1713		return ip6mr_sk_init(mrt, sk);
1714
1715	case MRT6_DONE:
1716		return ip6mr_sk_done(sk);
1717
1718	case MRT6_ADD_MIF:
1719		if (optlen < sizeof(vif))
1720			return -EINVAL;
1721		if (copy_from_sockptr(&vif, optval, sizeof(vif)))
1722			return -EFAULT;
1723		if (vif.mif6c_mifi >= MAXMIFS)
1724			return -ENFILE;
1725		rtnl_lock();
1726		ret = mif6_add(net, mrt, &vif,
1727			       sk == rtnl_dereference(mrt->mroute_sk));
1728		rtnl_unlock();
1729		return ret;
1730
1731	case MRT6_DEL_MIF:
1732		if (optlen < sizeof(mifi_t))
1733			return -EINVAL;
1734		if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t)))
1735			return -EFAULT;
1736		rtnl_lock();
1737		ret = mif6_delete(mrt, mifi, 0, NULL);
1738		rtnl_unlock();
1739		return ret;
1740
1741	/*
1742	 *	Manipulate the forwarding caches. These live
1743	 *	in a sort of kernel/user symbiosis.
1744	 */
1745	case MRT6_ADD_MFC:
1746	case MRT6_DEL_MFC:
1747		parent = -1;
1748		fallthrough;
1749	case MRT6_ADD_MFC_PROXY:
1750	case MRT6_DEL_MFC_PROXY:
1751		if (optlen < sizeof(mfc))
1752			return -EINVAL;
1753		if (copy_from_sockptr(&mfc, optval, sizeof(mfc)))
1754			return -EFAULT;
1755		if (parent == 0)
1756			parent = mfc.mf6cc_parent;
1757		rtnl_lock();
1758		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1759			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1760		else
1761			ret = ip6mr_mfc_add(net, mrt, &mfc,
1762					    sk ==
1763					    rtnl_dereference(mrt->mroute_sk),
1764					    parent);
1765		rtnl_unlock();
1766		return ret;
1767
1768	case MRT6_FLUSH:
1769	{
1770		int flags;
1771
1772		if (optlen != sizeof(flags))
1773			return -EINVAL;
1774		if (copy_from_sockptr(&flags, optval, sizeof(flags)))
1775			return -EFAULT;
1776		rtnl_lock();
1777		mroute_clean_tables(mrt, flags);
1778		rtnl_unlock();
1779		return 0;
1780	}
1781
1782	/*
1783	 *	Control PIM assert (to activate pim will activate assert)
1784	 */
1785	case MRT6_ASSERT:
1786	{
1787		int v;
1788
1789		if (optlen != sizeof(v))
1790			return -EINVAL;
1791		if (copy_from_sockptr(&v, optval, sizeof(v)))
1792			return -EFAULT;
1793		mrt->mroute_do_assert = v;
1794		return 0;
1795	}
1796
1797#ifdef CONFIG_IPV6_PIMSM_V2
1798	case MRT6_PIM:
1799	{
1800		bool do_wrmifwhole;
1801		int v;
1802
1803		if (optlen != sizeof(v))
1804			return -EINVAL;
1805		if (copy_from_sockptr(&v, optval, sizeof(v)))
1806			return -EFAULT;
1807
1808		do_wrmifwhole = (v == MRT6MSG_WRMIFWHOLE);
1809		v = !!v;
1810		rtnl_lock();
1811		ret = 0;
1812		if (v != mrt->mroute_do_pim) {
1813			mrt->mroute_do_pim = v;
1814			mrt->mroute_do_assert = v;
1815			mrt->mroute_do_wrvifwhole = do_wrmifwhole;
1816		}
1817		rtnl_unlock();
1818		return ret;
1819	}
1820
1821#endif
1822#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1823	case MRT6_TABLE:
1824	{
1825		u32 v;
1826
1827		if (optlen != sizeof(u32))
1828			return -EINVAL;
1829		if (copy_from_sockptr(&v, optval, sizeof(v)))
1830			return -EFAULT;
1831		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1832		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1833			return -EINVAL;
1834		if (sk == rcu_access_pointer(mrt->mroute_sk))
1835			return -EBUSY;
1836
1837		rtnl_lock();
1838		ret = 0;
1839		mrt = ip6mr_new_table(net, v);
1840		if (IS_ERR(mrt))
1841			ret = PTR_ERR(mrt);
1842		else
1843			raw6_sk(sk)->ip6mr_table = v;
1844		rtnl_unlock();
1845		return ret;
1846	}
1847#endif
1848	/*
1849	 *	Spurious command, or MRT6_VERSION which you cannot
1850	 *	set.
1851	 */
1852	default:
1853		return -ENOPROTOOPT;
1854	}
1855}
1856
1857/*
1858 *	Getsock opt support for the multicast routing system.
1859 */
1860
1861int ip6_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
1862			  sockptr_t optlen)
1863{
1864	int olr;
1865	int val;
1866	struct net *net = sock_net(sk);
1867	struct mr_table *mrt;
1868
1869	if (sk->sk_type != SOCK_RAW ||
1870	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1871		return -EOPNOTSUPP;
1872
1873	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1874	if (!mrt)
1875		return -ENOENT;
1876
1877	switch (optname) {
1878	case MRT6_VERSION:
1879		val = 0x0305;
1880		break;
1881#ifdef CONFIG_IPV6_PIMSM_V2
1882	case MRT6_PIM:
1883		val = mrt->mroute_do_pim;
1884		break;
1885#endif
1886	case MRT6_ASSERT:
1887		val = mrt->mroute_do_assert;
1888		break;
1889	default:
1890		return -ENOPROTOOPT;
1891	}
1892
1893	if (copy_from_sockptr(&olr, optlen, sizeof(int)))
1894		return -EFAULT;
1895
1896	olr = min_t(int, olr, sizeof(int));
1897	if (olr < 0)
1898		return -EINVAL;
1899
1900	if (copy_to_sockptr(optlen, &olr, sizeof(int)))
1901		return -EFAULT;
1902	if (copy_to_sockptr(optval, &val, olr))
1903		return -EFAULT;
1904	return 0;
1905}
1906
1907/*
1908 *	The IP multicast ioctl support routines.
1909 */
1910int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
 
1911{
1912	struct sioc_sg_req6 *sr;
1913	struct sioc_mif_req6 *vr;
1914	struct vif_device *vif;
1915	struct mfc6_cache *c;
1916	struct net *net = sock_net(sk);
1917	struct mr_table *mrt;
1918
1919	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1920	if (!mrt)
1921		return -ENOENT;
1922
1923	switch (cmd) {
1924	case SIOCGETMIFCNT_IN6:
1925		vr = (struct sioc_mif_req6 *)arg;
1926		if (vr->mifi >= mrt->maxvif)
 
1927			return -EINVAL;
1928		vr->mifi = array_index_nospec(vr->mifi, mrt->maxvif);
1929		rcu_read_lock();
1930		vif = &mrt->vif_table[vr->mifi];
1931		if (VIF_EXISTS(mrt, vr->mifi)) {
1932			vr->icount = READ_ONCE(vif->pkt_in);
1933			vr->ocount = READ_ONCE(vif->pkt_out);
1934			vr->ibytes = READ_ONCE(vif->bytes_in);
1935			vr->obytes = READ_ONCE(vif->bytes_out);
1936			rcu_read_unlock();
 
 
1937			return 0;
1938		}
1939		rcu_read_unlock();
1940		return -EADDRNOTAVAIL;
1941	case SIOCGETSGCNT_IN6:
1942		sr = (struct sioc_sg_req6 *)arg;
 
1943
1944		rcu_read_lock();
1945		c = ip6mr_cache_find(mrt, &sr->src.sin6_addr,
1946				     &sr->grp.sin6_addr);
1947		if (c) {
1948			sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
1949			sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
1950			sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
1951			rcu_read_unlock();
 
 
 
1952			return 0;
1953		}
1954		rcu_read_unlock();
1955		return -EADDRNOTAVAIL;
1956	default:
1957		return -ENOIOCTLCMD;
1958	}
1959}
1960
1961#ifdef CONFIG_COMPAT
1962struct compat_sioc_sg_req6 {
1963	struct sockaddr_in6 src;
1964	struct sockaddr_in6 grp;
1965	compat_ulong_t pktcnt;
1966	compat_ulong_t bytecnt;
1967	compat_ulong_t wrong_if;
1968};
1969
1970struct compat_sioc_mif_req6 {
1971	mifi_t	mifi;
1972	compat_ulong_t icount;
1973	compat_ulong_t ocount;
1974	compat_ulong_t ibytes;
1975	compat_ulong_t obytes;
1976};
1977
1978int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1979{
1980	struct compat_sioc_sg_req6 sr;
1981	struct compat_sioc_mif_req6 vr;
1982	struct vif_device *vif;
1983	struct mfc6_cache *c;
1984	struct net *net = sock_net(sk);
1985	struct mr_table *mrt;
1986
1987	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1988	if (!mrt)
1989		return -ENOENT;
1990
1991	switch (cmd) {
1992	case SIOCGETMIFCNT_IN6:
1993		if (copy_from_user(&vr, arg, sizeof(vr)))
1994			return -EFAULT;
1995		if (vr.mifi >= mrt->maxvif)
1996			return -EINVAL;
1997		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1998		rcu_read_lock();
1999		vif = &mrt->vif_table[vr.mifi];
2000		if (VIF_EXISTS(mrt, vr.mifi)) {
2001			vr.icount = READ_ONCE(vif->pkt_in);
2002			vr.ocount = READ_ONCE(vif->pkt_out);
2003			vr.ibytes = READ_ONCE(vif->bytes_in);
2004			vr.obytes = READ_ONCE(vif->bytes_out);
2005			rcu_read_unlock();
2006
2007			if (copy_to_user(arg, &vr, sizeof(vr)))
2008				return -EFAULT;
2009			return 0;
2010		}
2011		rcu_read_unlock();
2012		return -EADDRNOTAVAIL;
2013	case SIOCGETSGCNT_IN6:
2014		if (copy_from_user(&sr, arg, sizeof(sr)))
2015			return -EFAULT;
2016
2017		rcu_read_lock();
2018		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
2019		if (c) {
2020			sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
2021			sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
2022			sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
2023			rcu_read_unlock();
2024
2025			if (copy_to_user(arg, &sr, sizeof(sr)))
2026				return -EFAULT;
2027			return 0;
2028		}
2029		rcu_read_unlock();
2030		return -EADDRNOTAVAIL;
2031	default:
2032		return -ENOIOCTLCMD;
2033	}
2034}
2035#endif
2036
2037static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2038{
2039	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2040		      IPSTATS_MIB_OUTFORWDATAGRAMS);
 
 
2041	return dst_output(net, sk, skb);
2042}
2043
2044/*
2045 *	Processing handlers for ip6mr_forward
2046 */
2047
2048static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
2049			  struct sk_buff *skb, int vifi)
2050{
2051	struct vif_device *vif = &mrt->vif_table[vifi];
2052	struct net_device *vif_dev;
2053	struct ipv6hdr *ipv6h;
 
 
2054	struct dst_entry *dst;
2055	struct flowi6 fl6;
2056
2057	vif_dev = vif_dev_read(vif);
2058	if (!vif_dev)
2059		goto out_free;
2060
2061#ifdef CONFIG_IPV6_PIMSM_V2
2062	if (vif->flags & MIFF_REGISTER) {
2063		WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
2064		WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
2065		DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
2066		DEV_STATS_INC(vif_dev, tx_packets);
2067		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2068		goto out_free;
2069	}
2070#endif
2071
2072	ipv6h = ipv6_hdr(skb);
2073
2074	fl6 = (struct flowi6) {
2075		.flowi6_oif = vif->link,
2076		.daddr = ipv6h->daddr,
2077	};
2078
2079	dst = ip6_route_output(net, NULL, &fl6);
2080	if (dst->error) {
2081		dst_release(dst);
2082		goto out_free;
2083	}
2084
2085	skb_dst_drop(skb);
2086	skb_dst_set(skb, dst);
2087
2088	/*
2089	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2090	 * not only before forwarding, but after forwarding on all output
2091	 * interfaces. It is clear, if mrouter runs a multicasting
2092	 * program, it should receive packets not depending to what interface
2093	 * program is joined.
2094	 * If we will not make it, the program will have to join on all
2095	 * interfaces. On the other hand, multihoming host (or router, but
2096	 * not mrouter) cannot join to more than one interface - it will
2097	 * result in receiving multiple packets.
2098	 */
2099	skb->dev = vif_dev;
2100	WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
2101	WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
 
2102
2103	/* We are about to write */
2104	/* XXX: extension headers? */
2105	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(vif_dev)))
2106		goto out_free;
2107
2108	ipv6h = ipv6_hdr(skb);
2109	ipv6h->hop_limit--;
2110
2111	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2112
2113	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2114		       net, NULL, skb, skb->dev, vif_dev,
2115		       ip6mr_forward2_finish);
2116
2117out_free:
2118	kfree_skb(skb);
2119	return 0;
2120}
2121
2122/* Called with rcu_read_lock() */
2123static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2124{
2125	int ct;
2126
2127	/* Pairs with WRITE_ONCE() in mif6_delete()/mif6_add() */
2128	for (ct = READ_ONCE(mrt->maxvif) - 1; ct >= 0; ct--) {
2129		if (rcu_access_pointer(mrt->vif_table[ct].dev) == dev)
2130			break;
2131	}
2132	return ct;
2133}
2134
2135/* Called under rcu_read_lock() */
2136static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2137			   struct net_device *dev, struct sk_buff *skb,
2138			   struct mfc6_cache *c)
2139{
2140	int psend = -1;
2141	int vif, ct;
2142	int true_vifi = ip6mr_find_vif(mrt, dev);
2143
2144	vif = c->_c.mfc_parent;
2145	atomic_long_inc(&c->_c.mfc_un.res.pkt);
2146	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
2147	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
2148
2149	if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2150		struct mfc6_cache *cache_proxy;
2151
2152		/* For an (*,G) entry, we only check that the incoming
2153		 * interface is part of the static tree.
2154		 */
2155		cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2156		if (cache_proxy &&
2157		    cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
2158			goto forward;
2159	}
2160
2161	/*
2162	 * Wrong interface: drop packet and (maybe) send PIM assert.
2163	 */
2164	if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
2165		atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
2166
2167		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2168		    /* pimsm uses asserts, when switching from RPT to SPT,
2169		       so that we cannot check that packet arrived on an oif.
2170		       It is bad, but otherwise we would need to move pretty
2171		       large chunk of pimd to kernel. Ough... --ANK
2172		     */
2173		    (mrt->mroute_do_pim ||
2174		     c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2175		    time_after(jiffies,
2176			       c->_c.mfc_un.res.last_assert +
2177			       MFC_ASSERT_THRESH)) {
2178			c->_c.mfc_un.res.last_assert = jiffies;
2179			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2180			if (mrt->mroute_do_wrvifwhole)
2181				ip6mr_cache_report(mrt, skb, true_vifi,
2182						   MRT6MSG_WRMIFWHOLE);
2183		}
2184		goto dont_forward;
2185	}
2186
2187forward:
2188	WRITE_ONCE(mrt->vif_table[vif].pkt_in,
2189		   mrt->vif_table[vif].pkt_in + 1);
2190	WRITE_ONCE(mrt->vif_table[vif].bytes_in,
2191		   mrt->vif_table[vif].bytes_in + skb->len);
2192
2193	/*
2194	 *	Forward the frame
2195	 */
2196	if (ipv6_addr_any(&c->mf6c_origin) &&
2197	    ipv6_addr_any(&c->mf6c_mcastgrp)) {
2198		if (true_vifi >= 0 &&
2199		    true_vifi != c->_c.mfc_parent &&
2200		    ipv6_hdr(skb)->hop_limit >
2201				c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2202			/* It's an (*,*) entry and the packet is not coming from
2203			 * the upstream: forward the packet to the upstream
2204			 * only.
2205			 */
2206			psend = c->_c.mfc_parent;
2207			goto last_forward;
2208		}
2209		goto dont_forward;
2210	}
2211	for (ct = c->_c.mfc_un.res.maxvif - 1;
2212	     ct >= c->_c.mfc_un.res.minvif; ct--) {
2213		/* For (*,G) entry, don't forward to the incoming interface */
2214		if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2215		    ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2216			if (psend != -1) {
2217				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2218				if (skb2)
2219					ip6mr_forward2(net, mrt, skb2, psend);
2220			}
2221			psend = ct;
2222		}
2223	}
2224last_forward:
2225	if (psend != -1) {
2226		ip6mr_forward2(net, mrt, skb, psend);
2227		return;
2228	}
2229
2230dont_forward:
2231	kfree_skb(skb);
2232}
2233
2234
2235/*
2236 *	Multicast packets for forwarding arrive here
2237 */
2238
2239int ip6_mr_input(struct sk_buff *skb)
2240{
2241	struct mfc6_cache *cache;
2242	struct net *net = dev_net(skb->dev);
2243	struct mr_table *mrt;
2244	struct flowi6 fl6 = {
2245		.flowi6_iif	= skb->dev->ifindex,
2246		.flowi6_mark	= skb->mark,
2247	};
2248	int err;
2249	struct net_device *dev;
2250
2251	/* skb->dev passed in is the master dev for vrfs.
2252	 * Get the proper interface that does have a vif associated with it.
2253	 */
2254	dev = skb->dev;
2255	if (netif_is_l3_master(skb->dev)) {
2256		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2257		if (!dev) {
2258			kfree_skb(skb);
2259			return -ENODEV;
2260		}
2261	}
2262
2263	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2264	if (err < 0) {
2265		kfree_skb(skb);
2266		return err;
2267	}
2268
 
2269	cache = ip6mr_cache_find(mrt,
2270				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2271	if (!cache) {
2272		int vif = ip6mr_find_vif(mrt, dev);
2273
2274		if (vif >= 0)
2275			cache = ip6mr_cache_find_any(mrt,
2276						     &ipv6_hdr(skb)->daddr,
2277						     vif);
2278	}
2279
2280	/*
2281	 *	No usable cache entry
2282	 */
2283	if (!cache) {
2284		int vif;
2285
2286		vif = ip6mr_find_vif(mrt, dev);
2287		if (vif >= 0) {
2288			int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
 
2289
2290			return err;
2291		}
 
2292		kfree_skb(skb);
2293		return -ENODEV;
2294	}
2295
2296	ip6_mr_forward(net, mrt, dev, skb, cache);
 
 
2297
2298	return 0;
2299}
2300
2301int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2302		    u32 portid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2303{
2304	int err;
2305	struct mr_table *mrt;
2306	struct mfc6_cache *cache;
2307	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
2308
2309	rcu_read_lock();
2310	mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT);
2311	if (!mrt) {
2312		rcu_read_unlock();
2313		return -ENOENT;
2314	}
2315
 
2316	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2317	if (!cache && skb->dev) {
2318		int vif = ip6mr_find_vif(mrt, skb->dev);
2319
2320		if (vif >= 0)
2321			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2322						     vif);
2323	}
2324
2325	if (!cache) {
2326		struct sk_buff *skb2;
2327		struct ipv6hdr *iph;
2328		struct net_device *dev;
2329		int vif;
2330
 
 
 
 
 
2331		dev = skb->dev;
2332		if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2333			rcu_read_unlock();
2334			return -ENODEV;
2335		}
2336
2337		/* really correct? */
2338		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2339		if (!skb2) {
2340			rcu_read_unlock();
2341			return -ENOMEM;
2342		}
2343
2344		NETLINK_CB(skb2).portid = portid;
2345		skb_reset_transport_header(skb2);
2346
2347		skb_put(skb2, sizeof(struct ipv6hdr));
2348		skb_reset_network_header(skb2);
2349
2350		iph = ipv6_hdr(skb2);
2351		iph->version = 0;
2352		iph->priority = 0;
2353		iph->flow_lbl[0] = 0;
2354		iph->flow_lbl[1] = 0;
2355		iph->flow_lbl[2] = 0;
2356		iph->payload_len = 0;
2357		iph->nexthdr = IPPROTO_NONE;
2358		iph->hop_limit = 0;
2359		iph->saddr = rt->rt6i_src.addr;
2360		iph->daddr = rt->rt6i_dst.addr;
2361
2362		err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
2363		rcu_read_unlock();
2364
2365		return err;
2366	}
2367
2368	err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2369	rcu_read_unlock();
 
 
 
2370	return err;
2371}
2372
2373static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2374			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2375			     int flags)
2376{
2377	struct nlmsghdr *nlh;
2378	struct rtmsg *rtm;
2379	int err;
2380
2381	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2382	if (!nlh)
2383		return -EMSGSIZE;
2384
2385	rtm = nlmsg_data(nlh);
2386	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2387	rtm->rtm_dst_len  = 128;
2388	rtm->rtm_src_len  = 128;
2389	rtm->rtm_tos      = 0;
2390	rtm->rtm_table    = mrt->id;
2391	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2392		goto nla_put_failure;
2393	rtm->rtm_type = RTN_MULTICAST;
2394	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2395	if (c->_c.mfc_flags & MFC_STATIC)
2396		rtm->rtm_protocol = RTPROT_STATIC;
2397	else
2398		rtm->rtm_protocol = RTPROT_MROUTED;
2399	rtm->rtm_flags    = 0;
2400
2401	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2402	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2403		goto nla_put_failure;
2404	err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2405	/* do not break the dump if cache is unresolved */
2406	if (err < 0 && err != -ENOENT)
2407		goto nla_put_failure;
2408
2409	nlmsg_end(skb, nlh);
2410	return 0;
2411
2412nla_put_failure:
2413	nlmsg_cancel(skb, nlh);
2414	return -EMSGSIZE;
2415}
2416
2417static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2418			      u32 portid, u32 seq, struct mr_mfc *c,
2419			      int cmd, int flags)
2420{
2421	return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2422				 cmd, flags);
2423}
2424
2425static int mr6_msgsize(bool unresolved, int maxvif)
2426{
2427	size_t len =
2428		NLMSG_ALIGN(sizeof(struct rtmsg))
2429		+ nla_total_size(4)	/* RTA_TABLE */
2430		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2431		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2432		;
2433
2434	if (!unresolved)
2435		len = len
2436		      + nla_total_size(4)	/* RTA_IIF */
2437		      + nla_total_size(0)	/* RTA_MULTIPATH */
2438		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2439						/* RTA_MFC_STATS */
2440		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2441		;
2442
2443	return len;
2444}
2445
2446static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2447			      int cmd)
2448{
2449	struct net *net = read_pnet(&mrt->net);
2450	struct sk_buff *skb;
2451	int err = -ENOBUFS;
2452
2453	skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2454			GFP_ATOMIC);
2455	if (!skb)
2456		goto errout;
2457
2458	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2459	if (err < 0)
2460		goto errout;
2461
2462	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2463	return;
2464
2465errout:
2466	kfree_skb(skb);
2467	rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2468}
2469
2470static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2471{
2472	size_t len =
2473		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2474		+ nla_total_size(1)	/* IP6MRA_CREPORT_MSGTYPE */
2475		+ nla_total_size(4)	/* IP6MRA_CREPORT_MIF_ID */
2476					/* IP6MRA_CREPORT_SRC_ADDR */
2477		+ nla_total_size(sizeof(struct in6_addr))
2478					/* IP6MRA_CREPORT_DST_ADDR */
2479		+ nla_total_size(sizeof(struct in6_addr))
2480					/* IP6MRA_CREPORT_PKT */
2481		+ nla_total_size(payloadlen)
2482		;
2483
2484	return len;
2485}
2486
2487static void mrt6msg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
2488{
2489	struct net *net = read_pnet(&mrt->net);
2490	struct nlmsghdr *nlh;
2491	struct rtgenmsg *rtgenm;
2492	struct mrt6msg *msg;
2493	struct sk_buff *skb;
2494	struct nlattr *nla;
2495	int payloadlen;
2496
2497	payloadlen = pkt->len - sizeof(struct mrt6msg);
2498	msg = (struct mrt6msg *)skb_transport_header(pkt);
2499
2500	skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2501	if (!skb)
2502		goto errout;
2503
2504	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2505			sizeof(struct rtgenmsg), 0);
2506	if (!nlh)
2507		goto errout;
2508	rtgenm = nlmsg_data(nlh);
2509	rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2510	if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2511	    nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2512	    nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2513			     &msg->im6_src) ||
2514	    nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2515			     &msg->im6_dst))
2516		goto nla_put_failure;
2517
2518	nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2519	if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2520				  nla_data(nla), payloadlen))
2521		goto nla_put_failure;
2522
2523	nlmsg_end(skb, nlh);
2524
2525	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2526	return;
2527
2528nla_put_failure:
2529	nlmsg_cancel(skb, nlh);
2530errout:
2531	kfree_skb(skb);
2532	rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2533}
2534
2535static const struct nla_policy ip6mr_getroute_policy[RTA_MAX + 1] = {
2536	[RTA_SRC]		= NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
2537	[RTA_DST]		= NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
2538	[RTA_TABLE]		= { .type = NLA_U32 },
2539};
2540
2541static int ip6mr_rtm_valid_getroute_req(struct sk_buff *skb,
2542					const struct nlmsghdr *nlh,
2543					struct nlattr **tb,
2544					struct netlink_ext_ack *extack)
2545{
2546	struct rtmsg *rtm;
2547	int err;
2548
2549	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, ip6mr_getroute_policy,
2550			  extack);
2551	if (err)
2552		return err;
2553
2554	rtm = nlmsg_data(nlh);
2555	if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
2556	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
2557	    rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
2558	    rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
2559		NL_SET_ERR_MSG_MOD(extack,
2560				   "Invalid values in header for multicast route get request");
2561		return -EINVAL;
2562	}
2563
2564	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2565	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2566		NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
2567		return -EINVAL;
2568	}
2569
2570	return 0;
2571}
2572
2573static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2574			      struct netlink_ext_ack *extack)
2575{
2576	struct net *net = sock_net(in_skb->sk);
2577	struct in6_addr src = {}, grp = {};
2578	struct nlattr *tb[RTA_MAX + 1];
2579	struct mfc6_cache *cache;
2580	struct mr_table *mrt;
2581	struct sk_buff *skb;
2582	u32 tableid;
2583	int err;
2584
2585	err = ip6mr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2586	if (err < 0)
2587		return err;
2588
2589	if (tb[RTA_SRC])
2590		src = nla_get_in6_addr(tb[RTA_SRC]);
2591	if (tb[RTA_DST])
2592		grp = nla_get_in6_addr(tb[RTA_DST]);
2593	tableid = nla_get_u32_default(tb[RTA_TABLE], 0);
2594
2595	mrt = __ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT);
2596	if (!mrt) {
2597		NL_SET_ERR_MSG_MOD(extack, "MR table does not exist");
2598		return -ENOENT;
2599	}
2600
2601	/* entries are added/deleted only under RTNL */
2602	rcu_read_lock();
2603	cache = ip6mr_cache_find(mrt, &src, &grp);
2604	rcu_read_unlock();
2605	if (!cache) {
2606		NL_SET_ERR_MSG_MOD(extack, "MR cache entry not found");
2607		return -ENOENT;
2608	}
2609
2610	skb = nlmsg_new(mr6_msgsize(false, mrt->maxvif), GFP_KERNEL);
2611	if (!skb)
2612		return -ENOBUFS;
2613
2614	err = ip6mr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2615				nlh->nlmsg_seq, cache, RTM_NEWROUTE, 0);
2616	if (err < 0) {
2617		kfree_skb(skb);
2618		return err;
2619	}
2620
2621	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2622}
2623
2624static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2625{
2626	const struct nlmsghdr *nlh = cb->nlh;
2627	struct fib_dump_filter filter = {
2628		.rtnl_held = true,
2629	};
2630	int err;
 
 
 
 
 
2631
2632	if (cb->strict_check) {
2633		err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
2634					    &filter, cb);
2635		if (err < 0)
2636			return err;
2637	}
2638
2639	if (filter.table_id) {
2640		struct mr_table *mrt;
2641
2642		mrt = __ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2643		if (!mrt) {
2644			if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
2645				return skb->len;
2646
2647			NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2648			return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2649		}
2650		err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2651				    &mfc_unres_lock, &filter);
2652		return skb->len ? : err;
2653	}
 
 
 
 
 
 
 
 
2654
2655	return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2656				_ip6mr_fill_mroute, &mfc_unres_lock, &filter);
2657}