Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *	Linux IPv6 multicast routing support for BSD pim6sd
   3 *	Based on net/ipv4/ipmr.c.
   4 *
   5 *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
   6 *		LSIIT Laboratory, Strasbourg, France
   7 *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
   8 *		6WIND, Paris, France
   9 *	Copyright (C)2007,2008 USAGI/WIDE Project
  10 *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
  11 *
  12 *	This program is free software; you can redistribute it and/or
  13 *	modify it under the terms of the GNU General Public License
  14 *	as published by the Free Software Foundation; either version
  15 *	2 of the License, or (at your option) any later version.
  16 *
  17 */
  18
  19#include <asm/system.h>
  20#include <asm/uaccess.h>
  21#include <linux/types.h>
  22#include <linux/sched.h>
  23#include <linux/errno.h>
  24#include <linux/timer.h>
  25#include <linux/mm.h>
  26#include <linux/kernel.h>
  27#include <linux/fcntl.h>
  28#include <linux/stat.h>
  29#include <linux/socket.h>
  30#include <linux/inet.h>
  31#include <linux/netdevice.h>
  32#include <linux/inetdevice.h>
  33#include <linux/proc_fs.h>
  34#include <linux/seq_file.h>
  35#include <linux/init.h>
  36#include <linux/slab.h>
  37#include <linux/compat.h>
 
  38#include <net/protocol.h>
  39#include <linux/skbuff.h>
  40#include <net/sock.h>
  41#include <net/raw.h>
  42#include <linux/notifier.h>
  43#include <linux/if_arp.h>
  44#include <net/checksum.h>
  45#include <net/netlink.h>
  46#include <net/fib_rules.h>
  47
  48#include <net/ipv6.h>
  49#include <net/ip6_route.h>
  50#include <linux/mroute6.h>
  51#include <linux/pim.h>
  52#include <net/addrconf.h>
  53#include <linux/netfilter_ipv6.h>
 
  54#include <net/ip6_checksum.h>
 
 
  55
  56struct mr6_table {
  57	struct list_head	list;
  58#ifdef CONFIG_NET_NS
  59	struct net		*net;
  60#endif
  61	u32			id;
  62	struct sock		*mroute6_sk;
  63	struct timer_list	ipmr_expire_timer;
  64	struct list_head	mfc6_unres_queue;
  65	struct list_head	mfc6_cache_array[MFC6_LINES];
  66	struct mif_device	vif6_table[MAXMIFS];
  67	int			maxvif;
  68	atomic_t		cache_resolve_queue_len;
  69	int			mroute_do_assert;
  70	int			mroute_do_pim;
  71#ifdef CONFIG_IPV6_PIMSM_V2
  72	int			mroute_reg_vif_num;
  73#endif
  74};
  75
  76struct ip6mr_rule {
  77	struct fib_rule		common;
  78};
  79
  80struct ip6mr_result {
  81	struct mr6_table	*mrt;
  82};
  83
  84/* Big lock, protecting vif table, mrt cache and mroute socket state.
  85   Note that the changes are semaphored via rtnl_lock.
  86 */
  87
  88static DEFINE_RWLOCK(mrt_lock);
  89
  90/*
  91 *	Multicast router control variables
  92 */
  93
  94#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
  95
  96/* Special spinlock for queue of unresolved entries */
  97static DEFINE_SPINLOCK(mfc_unres_lock);
  98
  99/* We return to original Alan's scheme. Hash table of resolved
 100   entries is changed only in process context and protected
 101   with weak lock mrt_lock. Queue of unresolved entries is protected
 102   with strong spinlock mfc_unres_lock.
 103
 104   In this case data path is free of exclusive locks at all.
 105 */
 106
 107static struct kmem_cache *mrt_cachep __read_mostly;
 108
 109static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
 110static void ip6mr_free_table(struct mr6_table *mrt);
 111
 112static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 113			  struct sk_buff *skb, struct mfc6_cache *cache);
 114static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
 
 115			      mifi_t mifi, int assert);
 116static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 117			       struct mfc6_cache *c, struct rtmsg *rtm);
 
 118static int ip6mr_rtm_dumproute(struct sk_buff *skb,
 119			       struct netlink_callback *cb);
 120static void mroute_clean_tables(struct mr6_table *mrt);
 121static void ipmr_expire_process(unsigned long arg);
 122
 123#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 124#define ip6mr_for_each_table(mrt, net) \
 125	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
 126
 127static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128{
 129	struct mr6_table *mrt;
 130
 131	ip6mr_for_each_table(mrt, net) {
 132		if (mrt->id == id)
 133			return mrt;
 134	}
 135	return NULL;
 136}
 137
 138static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 139			    struct mr6_table **mrt)
 140{
 141	struct ip6mr_result res;
 142	struct fib_lookup_arg arg = { .result = &res, };
 143	int err;
 
 
 
 
 
 
 
 
 144
 145	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 146			       flowi6_to_flowi(flp6), 0, &arg);
 147	if (err < 0)
 148		return err;
 149	*mrt = res.mrt;
 150	return 0;
 151}
 152
 153static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
 154			     int flags, struct fib_lookup_arg *arg)
 155{
 156	struct ip6mr_result *res = arg->result;
 157	struct mr6_table *mrt;
 158
 159	switch (rule->action) {
 160	case FR_ACT_TO_TBL:
 161		break;
 162	case FR_ACT_UNREACHABLE:
 163		return -ENETUNREACH;
 164	case FR_ACT_PROHIBIT:
 165		return -EACCES;
 166	case FR_ACT_BLACKHOLE:
 167	default:
 168		return -EINVAL;
 169	}
 170
 171	mrt = ip6mr_get_table(rule->fr_net, rule->table);
 172	if (mrt == NULL)
 
 
 173		return -EAGAIN;
 174	res->mrt = mrt;
 175	return 0;
 176}
 177
 178static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
 179{
 180	return 1;
 181}
 182
 183static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
 184	FRA_GENERIC_POLICY,
 185};
 186
 187static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
 188				struct fib_rule_hdr *frh, struct nlattr **tb)
 
 189{
 190	return 0;
 191}
 192
 193static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 194			      struct nlattr **tb)
 195{
 196	return 1;
 197}
 198
 199static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
 200			   struct fib_rule_hdr *frh)
 201{
 202	frh->dst_len = 0;
 203	frh->src_len = 0;
 204	frh->tos     = 0;
 205	return 0;
 206}
 207
 208static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = {
 209	.family		= RTNL_FAMILY_IP6MR,
 210	.rule_size	= sizeof(struct ip6mr_rule),
 211	.addr_size	= sizeof(struct in6_addr),
 212	.action		= ip6mr_rule_action,
 213	.match		= ip6mr_rule_match,
 214	.configure	= ip6mr_rule_configure,
 215	.compare	= ip6mr_rule_compare,
 216	.default_pref	= fib_default_rule_pref,
 217	.fill		= ip6mr_rule_fill,
 218	.nlgroup	= RTNLGRP_IPV6_RULE,
 219	.policy		= ip6mr_rule_policy,
 220	.owner		= THIS_MODULE,
 221};
 222
 223static int __net_init ip6mr_rules_init(struct net *net)
 224{
 225	struct fib_rules_ops *ops;
 226	struct mr6_table *mrt;
 227	int err;
 228
 229	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
 230	if (IS_ERR(ops))
 231		return PTR_ERR(ops);
 232
 233	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
 234
 235	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 236	if (mrt == NULL) {
 237		err = -ENOMEM;
 238		goto err1;
 239	}
 240
 241	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
 242	if (err < 0)
 243		goto err2;
 244
 245	net->ipv6.mr6_rules_ops = ops;
 246	return 0;
 247
 248err2:
 249	kfree(mrt);
 250err1:
 251	fib_rules_unregister(ops);
 252	return err;
 253}
 254
 255static void __net_exit ip6mr_rules_exit(struct net *net)
 256{
 257	struct mr6_table *mrt, *next;
 258
 
 259	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
 260		list_del(&mrt->list);
 261		ip6mr_free_table(mrt);
 262	}
 263	fib_rules_unregister(net->ipv6.mr6_rules_ops);
 
 
 
 
 
 
 264}
 
 
 
 
 
 
 
 
 
 
 
 
 265#else
 266#define ip6mr_for_each_table(mrt, net) \
 267	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
 268
 269static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 
 
 
 
 
 
 
 
 270{
 271	return net->ipv6.mrt6;
 272}
 273
 274static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 275			    struct mr6_table **mrt)
 276{
 277	*mrt = net->ipv6.mrt6;
 278	return 0;
 279}
 280
 281static int __net_init ip6mr_rules_init(struct net *net)
 282{
 283	net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
 284	return net->ipv6.mrt6 ? 0 : -ENOMEM;
 
 
 
 
 
 285}
 286
 287static void __net_exit ip6mr_rules_exit(struct net *net)
 288{
 
 289	ip6mr_free_table(net->ipv6.mrt6);
 
 
 290}
 291#endif
 292
 293static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 294{
 295	struct mr6_table *mrt;
 296	unsigned int i;
 297
 298	mrt = ip6mr_get_table(net, id);
 299	if (mrt != NULL)
 300		return mrt;
 301
 302	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
 303	if (mrt == NULL)
 304		return NULL;
 305	mrt->id = id;
 306	write_pnet(&mrt->net, net);
 307
 308	/* Forwarding cache */
 309	for (i = 0; i < MFC6_LINES; i++)
 310		INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
 
 
 311
 312	INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
 
 
 313
 314	setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
 315		    (unsigned long)mrt);
 
 
 
 
 
 
 316
 317#ifdef CONFIG_IPV6_PIMSM_V2
 318	mrt->mroute_reg_vif_num = -1;
 319#endif
 320#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 321	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
 322#endif
 323	return mrt;
 324}
 325
 326static void ip6mr_free_table(struct mr6_table *mrt)
 327{
 328	del_timer(&mrt->ipmr_expire_timer);
 329	mroute_clean_tables(mrt);
 330	kfree(mrt);
 331}
 332
 333#ifdef CONFIG_PROC_FS
 334
 335struct ipmr_mfc_iter {
 336	struct seq_net_private p;
 337	struct mr6_table *mrt;
 338	struct list_head *cache;
 339	int ct;
 340};
 341
 
 
 
 
 342
 343static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
 344					   struct ipmr_mfc_iter *it, loff_t pos)
 345{
 346	struct mr6_table *mrt = it->mrt;
 347	struct mfc6_cache *mfc;
 348
 349	read_lock(&mrt_lock);
 350	for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
 351		it->cache = &mrt->mfc6_cache_array[it->ct];
 352		list_for_each_entry(mfc, it->cache, list)
 353			if (pos-- == 0)
 354				return mfc;
 355	}
 356	read_unlock(&mrt_lock);
 357
 358	spin_lock_bh(&mfc_unres_lock);
 359	it->cache = &mrt->mfc6_unres_queue;
 360	list_for_each_entry(mfc, it->cache, list)
 361		if (pos-- == 0)
 362			return mfc;
 363	spin_unlock_bh(&mfc_unres_lock);
 364
 365	it->cache = NULL;
 366	return NULL;
 367}
 368
 369/*
 370 *	The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
 371 */
 372
 373struct ipmr_vif_iter {
 374	struct seq_net_private p;
 375	struct mr6_table *mrt;
 376	int ct;
 377};
 378
 379static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
 380					    struct ipmr_vif_iter *iter,
 381					    loff_t pos)
 382{
 383	struct mr6_table *mrt = iter->mrt;
 384
 385	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
 386		if (!MIF_EXISTS(mrt, iter->ct))
 387			continue;
 388		if (pos-- == 0)
 389			return &mrt->vif6_table[iter->ct];
 390	}
 391	return NULL;
 392}
 393
 
 
 
 
 
 394static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
 395	__acquires(mrt_lock)
 396{
 397	struct ipmr_vif_iter *iter = seq->private;
 398	struct net *net = seq_file_net(seq);
 399	struct mr6_table *mrt;
 400
 401	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 402	if (mrt == NULL)
 403		return ERR_PTR(-ENOENT);
 404
 405	iter->mrt = mrt;
 406
 407	read_lock(&mrt_lock);
 408	return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
 409		: SEQ_START_TOKEN;
 410}
 411
 412static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 413{
 414	struct ipmr_vif_iter *iter = seq->private;
 415	struct net *net = seq_file_net(seq);
 416	struct mr6_table *mrt = iter->mrt;
 417
 418	++*pos;
 419	if (v == SEQ_START_TOKEN)
 420		return ip6mr_vif_seq_idx(net, iter, 0);
 421
 422	while (++iter->ct < mrt->maxvif) {
 423		if (!MIF_EXISTS(mrt, iter->ct))
 424			continue;
 425		return &mrt->vif6_table[iter->ct];
 426	}
 427	return NULL;
 428}
 429
 430static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
 431	__releases(mrt_lock)
 432{
 433	read_unlock(&mrt_lock);
 434}
 435
 436static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
 437{
 438	struct ipmr_vif_iter *iter = seq->private;
 439	struct mr6_table *mrt = iter->mrt;
 440
 441	if (v == SEQ_START_TOKEN) {
 442		seq_puts(seq,
 443			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
 444	} else {
 445		const struct mif_device *vif = v;
 446		const char *name = vif->dev ? vif->dev->name : "none";
 447
 448		seq_printf(seq,
 449			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
 450			   vif - mrt->vif6_table,
 451			   name, vif->bytes_in, vif->pkt_in,
 452			   vif->bytes_out, vif->pkt_out,
 453			   vif->flags);
 454	}
 455	return 0;
 456}
 457
 458static const struct seq_operations ip6mr_vif_seq_ops = {
 459	.start = ip6mr_vif_seq_start,
 460	.next  = ip6mr_vif_seq_next,
 461	.stop  = ip6mr_vif_seq_stop,
 462	.show  = ip6mr_vif_seq_show,
 463};
 464
 465static int ip6mr_vif_open(struct inode *inode, struct file *file)
 466{
 467	return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
 468			    sizeof(struct ipmr_vif_iter));
 469}
 470
 471static const struct file_operations ip6mr_vif_fops = {
 472	.owner	 = THIS_MODULE,
 473	.open    = ip6mr_vif_open,
 474	.read    = seq_read,
 475	.llseek  = seq_lseek,
 476	.release = seq_release_net,
 477};
 478
 479static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 480{
 481	struct ipmr_mfc_iter *it = seq->private;
 482	struct net *net = seq_file_net(seq);
 483	struct mr6_table *mrt;
 484
 485	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 486	if (mrt == NULL)
 487		return ERR_PTR(-ENOENT);
 488
 489	it->mrt = mrt;
 490	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
 491		: SEQ_START_TOKEN;
 492}
 493
 494static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 495{
 496	struct mfc6_cache *mfc = v;
 497	struct ipmr_mfc_iter *it = seq->private;
 498	struct net *net = seq_file_net(seq);
 499	struct mr6_table *mrt = it->mrt;
 500
 501	++*pos;
 502
 503	if (v == SEQ_START_TOKEN)
 504		return ipmr_mfc_seq_idx(net, seq->private, 0);
 505
 506	if (mfc->list.next != it->cache)
 507		return list_entry(mfc->list.next, struct mfc6_cache, list);
 508
 509	if (it->cache == &mrt->mfc6_unres_queue)
 510		goto end_of_list;
 511
 512	BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
 513
 514	while (++it->ct < MFC6_LINES) {
 515		it->cache = &mrt->mfc6_cache_array[it->ct];
 516		if (list_empty(it->cache))
 517			continue;
 518		return list_first_entry(it->cache, struct mfc6_cache, list);
 519	}
 520
 521	/* exhausted cache_array, show unresolved */
 522	read_unlock(&mrt_lock);
 523	it->cache = &mrt->mfc6_unres_queue;
 524	it->ct = 0;
 525
 526	spin_lock_bh(&mfc_unres_lock);
 527	if (!list_empty(it->cache))
 528		return list_first_entry(it->cache, struct mfc6_cache, list);
 529
 530 end_of_list:
 531	spin_unlock_bh(&mfc_unres_lock);
 532	it->cache = NULL;
 533
 534	return NULL;
 535}
 536
 537static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
 538{
 539	struct ipmr_mfc_iter *it = seq->private;
 540	struct mr6_table *mrt = it->mrt;
 541
 542	if (it->cache == &mrt->mfc6_unres_queue)
 543		spin_unlock_bh(&mfc_unres_lock);
 544	else if (it->cache == mrt->mfc6_cache_array)
 545		read_unlock(&mrt_lock);
 546}
 547
 548static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
 549{
 550	int n;
 551
 552	if (v == SEQ_START_TOKEN) {
 553		seq_puts(seq,
 554			 "Group                            "
 555			 "Origin                           "
 556			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
 557	} else {
 558		const struct mfc6_cache *mfc = v;
 559		const struct ipmr_mfc_iter *it = seq->private;
 560		struct mr6_table *mrt = it->mrt;
 561
 562		seq_printf(seq, "%pI6 %pI6 %-3hd",
 563			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
 564			   mfc->mf6c_parent);
 565
 566		if (it->cache != &mrt->mfc6_unres_queue) {
 567			seq_printf(seq, " %8lu %8lu %8lu",
 568				   mfc->mfc_un.res.pkt,
 569				   mfc->mfc_un.res.bytes,
 570				   mfc->mfc_un.res.wrong_if);
 571			for (n = mfc->mfc_un.res.minvif;
 572			     n < mfc->mfc_un.res.maxvif; n++) {
 573				if (MIF_EXISTS(mrt, n) &&
 574				    mfc->mfc_un.res.ttls[n] < 255)
 575					seq_printf(seq,
 576						   " %2d:%-3d",
 577						   n, mfc->mfc_un.res.ttls[n]);
 578			}
 579		} else {
 580			/* unresolved mfc_caches don't contain
 581			 * pkt, bytes and wrong_if values
 582			 */
 583			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
 584		}
 585		seq_putc(seq, '\n');
 586	}
 587	return 0;
 588}
 589
 590static const struct seq_operations ipmr_mfc_seq_ops = {
 591	.start = ipmr_mfc_seq_start,
 592	.next  = ipmr_mfc_seq_next,
 593	.stop  = ipmr_mfc_seq_stop,
 594	.show  = ipmr_mfc_seq_show,
 595};
 596
 597static int ipmr_mfc_open(struct inode *inode, struct file *file)
 598{
 599	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
 600			    sizeof(struct ipmr_mfc_iter));
 601}
 602
 603static const struct file_operations ip6mr_mfc_fops = {
 604	.owner	 = THIS_MODULE,
 605	.open    = ipmr_mfc_open,
 606	.read    = seq_read,
 607	.llseek  = seq_lseek,
 608	.release = seq_release_net,
 609};
 610#endif
 611
 612#ifdef CONFIG_IPV6_PIMSM_V2
 613
 614static int pim6_rcv(struct sk_buff *skb)
 615{
 616	struct pimreghdr *pim;
 617	struct ipv6hdr   *encap;
 618	struct net_device  *reg_dev = NULL;
 619	struct net *net = dev_net(skb->dev);
 620	struct mr6_table *mrt;
 621	struct flowi6 fl6 = {
 622		.flowi6_iif	= skb->dev->ifindex,
 623		.flowi6_mark	= skb->mark,
 624	};
 625	int reg_vif_num;
 626
 627	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
 628		goto drop;
 629
 630	pim = (struct pimreghdr *)skb_transport_header(skb);
 631	if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
 632	    (pim->flags & PIM_NULL_REGISTER) ||
 633	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 634			     sizeof(*pim), IPPROTO_PIM,
 635			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
 636	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
 637		goto drop;
 638
 639	/* check if the inner packet is destined to mcast group */
 640	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
 641				   sizeof(*pim));
 642
 643	if (!ipv6_addr_is_multicast(&encap->daddr) ||
 644	    encap->payload_len == 0 ||
 645	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
 646		goto drop;
 647
 648	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 649		goto drop;
 650	reg_vif_num = mrt->mroute_reg_vif_num;
 651
 652	read_lock(&mrt_lock);
 653	if (reg_vif_num >= 0)
 654		reg_dev = mrt->vif6_table[reg_vif_num].dev;
 655	if (reg_dev)
 656		dev_hold(reg_dev);
 657	read_unlock(&mrt_lock);
 658
 659	if (reg_dev == NULL)
 660		goto drop;
 661
 662	skb->mac_header = skb->network_header;
 663	skb_pull(skb, (u8 *)encap - skb->data);
 664	skb_reset_network_header(skb);
 665	skb->protocol = htons(ETH_P_IPV6);
 666	skb->ip_summed = CHECKSUM_NONE;
 667	skb->pkt_type = PACKET_HOST;
 668
 669	skb_tunnel_rx(skb, reg_dev);
 670
 671	netif_rx(skb);
 672
 673	dev_put(reg_dev);
 674	return 0;
 675 drop:
 676	kfree_skb(skb);
 677	return 0;
 678}
 679
 680static const struct inet6_protocol pim6_protocol = {
 681	.handler	=	pim6_rcv,
 682};
 683
 684/* Service routines creating virtual interfaces: PIMREG */
 685
 686static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
 687				      struct net_device *dev)
 688{
 689	struct net *net = dev_net(dev);
 690	struct mr6_table *mrt;
 691	struct flowi6 fl6 = {
 692		.flowi6_oif	= dev->ifindex,
 693		.flowi6_iif	= skb->skb_iif,
 694		.flowi6_mark	= skb->mark,
 695	};
 696	int err;
 697
 698	err = ip6mr_fib_lookup(net, &fl6, &mrt);
 699	if (err < 0) {
 700		kfree_skb(skb);
 701		return err;
 702	}
 703
 704	read_lock(&mrt_lock);
 705	dev->stats.tx_bytes += skb->len;
 706	dev->stats.tx_packets++;
 707	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
 708	read_unlock(&mrt_lock);
 709	kfree_skb(skb);
 710	return NETDEV_TX_OK;
 
 
 
 
 
 
 
 
 
 
 711}
 712
 713static const struct net_device_ops reg_vif_netdev_ops = {
 714	.ndo_start_xmit	= reg_vif_xmit,
 
 715};
 716
 717static void reg_vif_setup(struct net_device *dev)
 718{
 719	dev->type		= ARPHRD_PIMREG;
 720	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
 721	dev->flags		= IFF_NOARP;
 722	dev->netdev_ops		= &reg_vif_netdev_ops;
 723	dev->destructor		= free_netdev;
 724	dev->features		|= NETIF_F_NETNS_LOCAL;
 725}
 726
 727static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
 728{
 729	struct net_device *dev;
 730	char name[IFNAMSIZ];
 731
 732	if (mrt->id == RT6_TABLE_DFLT)
 733		sprintf(name, "pim6reg");
 734	else
 735		sprintf(name, "pim6reg%u", mrt->id);
 736
 737	dev = alloc_netdev(0, name, reg_vif_setup);
 738	if (dev == NULL)
 739		return NULL;
 740
 741	dev_net_set(dev, net);
 742
 743	if (register_netdevice(dev)) {
 744		free_netdev(dev);
 745		return NULL;
 746	}
 747	dev->iflink = 0;
 748
 749	if (dev_open(dev))
 750		goto failure;
 751
 752	dev_hold(dev);
 753	return dev;
 754
 755failure:
 756	/* allow the register to be completed before unregistering. */
 757	rtnl_unlock();
 758	rtnl_lock();
 759
 760	unregister_netdevice(dev);
 761	return NULL;
 762}
 763#endif
 764
 765/*
 766 *	Delete a VIF entry
 767 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 768
 769static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
 
 
 770{
 771	struct mif_device *v;
 772	struct net_device *dev;
 773	struct inet6_dev *in6_dev;
 774
 775	if (vifi < 0 || vifi >= mrt->maxvif)
 776		return -EADDRNOTAVAIL;
 777
 778	v = &mrt->vif6_table[vifi];
 
 
 
 
 
 779
 780	write_lock_bh(&mrt_lock);
 781	dev = v->dev;
 782	v->dev = NULL;
 783
 784	if (!dev) {
 785		write_unlock_bh(&mrt_lock);
 786		return -EADDRNOTAVAIL;
 787	}
 788
 789#ifdef CONFIG_IPV6_PIMSM_V2
 790	if (vifi == mrt->mroute_reg_vif_num)
 791		mrt->mroute_reg_vif_num = -1;
 792#endif
 793
 794	if (vifi + 1 == mrt->maxvif) {
 795		int tmp;
 796		for (tmp = vifi - 1; tmp >= 0; tmp--) {
 797			if (MIF_EXISTS(mrt, tmp))
 798				break;
 799		}
 800		mrt->maxvif = tmp + 1;
 801	}
 802
 803	write_unlock_bh(&mrt_lock);
 804
 805	dev_set_allmulti(dev, -1);
 806
 807	in6_dev = __in6_dev_get(dev);
 808	if (in6_dev)
 809		in6_dev->cnf.mc_forwarding--;
 
 
 
 
 810
 811	if (v->flags & MIFF_REGISTER)
 812		unregister_netdevice_queue(dev, head);
 813
 814	dev_put(dev);
 815	return 0;
 816}
 817
 
 
 
 
 
 
 
 818static inline void ip6mr_cache_free(struct mfc6_cache *c)
 819{
 820	kmem_cache_free(mrt_cachep, c);
 821}
 822
 823/* Destroy an unresolved cache entry, killing queued skbs
 824   and reporting error to netlink readers.
 825 */
 826
 827static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
 828{
 829	struct net *net = read_pnet(&mrt->net);
 830	struct sk_buff *skb;
 831
 832	atomic_dec(&mrt->cache_resolve_queue_len);
 833
 834	while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
 835		if (ipv6_hdr(skb)->version == 0) {
 836			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 
 837			nlh->nlmsg_type = NLMSG_ERROR;
 838			nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
 839			skb_trim(skb, nlh->nlmsg_len);
 840			((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
 841			rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
 842		} else
 843			kfree_skb(skb);
 844	}
 845
 846	ip6mr_cache_free(c);
 847}
 848
 849
 850/* Timer process for all the unresolved queue. */
 851
 852static void ipmr_do_expire_process(struct mr6_table *mrt)
 853{
 854	unsigned long now = jiffies;
 855	unsigned long expires = 10 * HZ;
 856	struct mfc6_cache *c, *next;
 857
 858	list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
 859		if (time_after(c->mfc_un.unres.expires, now)) {
 860			/* not yet... */
 861			unsigned long interval = c->mfc_un.unres.expires - now;
 862			if (interval < expires)
 863				expires = interval;
 864			continue;
 865		}
 866
 867		list_del(&c->list);
 868		ip6mr_destroy_unres(mrt, c);
 
 869	}
 870
 871	if (!list_empty(&mrt->mfc6_unres_queue))
 872		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 873}
 874
 875static void ipmr_expire_process(unsigned long arg)
 876{
 877	struct mr6_table *mrt = (struct mr6_table *)arg;
 878
 879	if (!spin_trylock(&mfc_unres_lock)) {
 880		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
 881		return;
 882	}
 883
 884	if (!list_empty(&mrt->mfc6_unres_queue))
 885		ipmr_do_expire_process(mrt);
 886
 887	spin_unlock(&mfc_unres_lock);
 888}
 889
 890/* Fill oifs list. It is called under write locked mrt_lock. */
 891
 892static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
 
 893				    unsigned char *ttls)
 894{
 895	int vifi;
 896
 897	cache->mfc_un.res.minvif = MAXMIFS;
 898	cache->mfc_un.res.maxvif = 0;
 899	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
 900
 901	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
 902		if (MIF_EXISTS(mrt, vifi) &&
 903		    ttls[vifi] && ttls[vifi] < 255) {
 904			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
 905			if (cache->mfc_un.res.minvif > vifi)
 906				cache->mfc_un.res.minvif = vifi;
 907			if (cache->mfc_un.res.maxvif <= vifi)
 908				cache->mfc_un.res.maxvif = vifi + 1;
 909		}
 910	}
 
 911}
 912
 913static int mif6_add(struct net *net, struct mr6_table *mrt,
 914		    struct mif6ctl *vifc, int mrtsock)
 915{
 916	int vifi = vifc->mif6c_mifi;
 917	struct mif_device *v = &mrt->vif6_table[vifi];
 918	struct net_device *dev;
 919	struct inet6_dev *in6_dev;
 920	int err;
 921
 922	/* Is vif busy ? */
 923	if (MIF_EXISTS(mrt, vifi))
 924		return -EADDRINUSE;
 925
 926	switch (vifc->mif6c_flags) {
 927#ifdef CONFIG_IPV6_PIMSM_V2
 928	case MIFF_REGISTER:
 929		/*
 930		 * Special Purpose VIF in PIM
 931		 * All the packets will be sent to the daemon
 932		 */
 933		if (mrt->mroute_reg_vif_num >= 0)
 934			return -EADDRINUSE;
 935		dev = ip6mr_reg_vif(net, mrt);
 936		if (!dev)
 937			return -ENOBUFS;
 938		err = dev_set_allmulti(dev, 1);
 939		if (err) {
 940			unregister_netdevice(dev);
 941			dev_put(dev);
 942			return err;
 943		}
 944		break;
 945#endif
 946	case 0:
 947		dev = dev_get_by_index(net, vifc->mif6c_pifi);
 948		if (!dev)
 949			return -EADDRNOTAVAIL;
 950		err = dev_set_allmulti(dev, 1);
 951		if (err) {
 952			dev_put(dev);
 953			return err;
 954		}
 955		break;
 956	default:
 957		return -EINVAL;
 958	}
 959
 960	in6_dev = __in6_dev_get(dev);
 961	if (in6_dev)
 962		in6_dev->cnf.mc_forwarding++;
 
 
 
 
 963
 964	/*
 965	 *	Fill in the VIF structures
 966	 */
 967	v->rate_limit = vifc->vifc_rate_limit;
 968	v->flags = vifc->mif6c_flags;
 969	if (!mrtsock)
 970		v->flags |= VIFF_STATIC;
 971	v->threshold = vifc->vifc_threshold;
 972	v->bytes_in = 0;
 973	v->bytes_out = 0;
 974	v->pkt_in = 0;
 975	v->pkt_out = 0;
 976	v->link = dev->ifindex;
 977	if (v->flags & MIFF_REGISTER)
 978		v->link = dev->iflink;
 979
 980	/* And finish update writing critical data */
 981	write_lock_bh(&mrt_lock);
 982	v->dev = dev;
 983#ifdef CONFIG_IPV6_PIMSM_V2
 984	if (v->flags & MIFF_REGISTER)
 985		mrt->mroute_reg_vif_num = vifi;
 986#endif
 987	if (vifi + 1 > mrt->maxvif)
 988		mrt->maxvif = vifi + 1;
 989	write_unlock_bh(&mrt_lock);
 
 
 990	return 0;
 991}
 992
 993static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
 994					   const struct in6_addr *origin,
 995					   const struct in6_addr *mcastgrp)
 996{
 997	int line = MFC6_HASH(mcastgrp, origin);
 998	struct mfc6_cache *c;
 
 
 999
1000	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1001		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1002		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1003			return c;
1004	}
1005	return NULL;
1006}
1007
1008/*
1009 *	Allocate a multicast cache entry
1010 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1011static struct mfc6_cache *ip6mr_cache_alloc(void)
1012{
1013	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1014	if (c == NULL)
1015		return NULL;
1016	c->mfc_un.res.minvif = MAXMIFS;
 
 
 
1017	return c;
1018}
1019
1020static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1021{
1022	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1023	if (c == NULL)
1024		return NULL;
1025	skb_queue_head_init(&c->mfc_un.unres.unresolved);
1026	c->mfc_un.unres.expires = jiffies + 10 * HZ;
1027	return c;
1028}
1029
1030/*
1031 *	A cache entry has gone into a resolved state from queued
1032 */
1033
1034static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1035				struct mfc6_cache *uc, struct mfc6_cache *c)
1036{
1037	struct sk_buff *skb;
1038
1039	/*
1040	 *	Play the pending entries through our router
1041	 */
1042
1043	while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1044		if (ipv6_hdr(skb)->version == 0) {
1045			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 
1046
1047			if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
 
1048				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1049			} else {
1050				nlh->nlmsg_type = NLMSG_ERROR;
1051				nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
1052				skb_trim(skb, nlh->nlmsg_len);
1053				((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
1054			}
1055			rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
1056		} else
1057			ip6_mr_forward(net, mrt, skb, c);
1058	}
1059}
1060
1061/*
1062 *	Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1063 *	expects the following bizarre scheme.
1064 *
1065 *	Called under mrt_lock.
1066 */
1067
1068static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1069			      mifi_t mifi, int assert)
1070{
 
1071	struct sk_buff *skb;
1072	struct mrt6msg *msg;
1073	int ret;
1074
1075#ifdef CONFIG_IPV6_PIMSM_V2
1076	if (assert == MRT6MSG_WHOLEPKT)
1077		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1078						+sizeof(*msg));
1079	else
1080#endif
1081		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1082
1083	if (!skb)
1084		return -ENOBUFS;
1085
1086	/* I suppose that internal messages
1087	 * do not require checksums */
1088
1089	skb->ip_summed = CHECKSUM_UNNECESSARY;
1090
1091#ifdef CONFIG_IPV6_PIMSM_V2
1092	if (assert == MRT6MSG_WHOLEPKT) {
1093		/* Ugly, but we have no choice with this interface.
1094		   Duplicate old header, fix length etc.
1095		   And all this only to mangle msg->im6_msgtype and
1096		   to set msg->im6_mbz to "mbz" :-)
1097		 */
1098		skb_push(skb, -skb_network_offset(pkt));
1099
1100		skb_push(skb, sizeof(*msg));
1101		skb_reset_transport_header(skb);
1102		msg = (struct mrt6msg *)skb_transport_header(skb);
1103		msg->im6_mbz = 0;
1104		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1105		msg->im6_mif = mrt->mroute_reg_vif_num;
1106		msg->im6_pad = 0;
1107		ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1108		ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1109
1110		skb->ip_summed = CHECKSUM_UNNECESSARY;
1111	} else
1112#endif
1113	{
1114	/*
1115	 *	Copy the IP header
1116	 */
1117
1118	skb_put(skb, sizeof(struct ipv6hdr));
1119	skb_reset_network_header(skb);
1120	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1121
1122	/*
1123	 *	Add our header
1124	 */
1125	skb_put(skb, sizeof(*msg));
1126	skb_reset_transport_header(skb);
1127	msg = (struct mrt6msg *)skb_transport_header(skb);
1128
1129	msg->im6_mbz = 0;
1130	msg->im6_msgtype = assert;
1131	msg->im6_mif = mifi;
1132	msg->im6_pad = 0;
1133	ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1134	ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1135
1136	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1137	skb->ip_summed = CHECKSUM_UNNECESSARY;
1138	}
1139
1140	if (mrt->mroute6_sk == NULL) {
 
 
 
1141		kfree_skb(skb);
1142		return -EINVAL;
1143	}
1144
1145	/*
1146	 *	Deliver to user space multicast routing algorithms
1147	 */
1148	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
 
1149	if (ret < 0) {
1150		if (net_ratelimit())
1151			printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
1152		kfree_skb(skb);
1153	}
1154
1155	return ret;
1156}
1157
1158/*
1159 *	Queue a packet for resolution. It gets locked cache entry!
1160 */
1161
1162static int
1163ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1164{
 
1165	bool found = false;
1166	int err;
1167	struct mfc6_cache *c;
1168
1169	spin_lock_bh(&mfc_unres_lock);
1170	list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1171		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1172		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1173			found = true;
1174			break;
1175		}
1176	}
1177
1178	if (!found) {
1179		/*
1180		 *	Create a new entry if allowable
1181		 */
1182
1183		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1184		    (c = ip6mr_cache_alloc_unres()) == NULL) {
1185			spin_unlock_bh(&mfc_unres_lock);
1186
1187			kfree_skb(skb);
1188			return -ENOBUFS;
1189		}
1190
1191		/*
1192		 *	Fill in the new cache entry
1193		 */
1194		c->mf6c_parent = -1;
1195		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1196		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1197
1198		/*
1199		 *	Reflect first query at pim6sd
1200		 */
1201		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1202		if (err < 0) {
1203			/* If the report failed throw the cache entry
1204			   out - Brad Parker
1205			 */
1206			spin_unlock_bh(&mfc_unres_lock);
1207
1208			ip6mr_cache_free(c);
1209			kfree_skb(skb);
1210			return err;
1211		}
1212
1213		atomic_inc(&mrt->cache_resolve_queue_len);
1214		list_add(&c->list, &mrt->mfc6_unres_queue);
 
1215
1216		ipmr_do_expire_process(mrt);
1217	}
1218
1219	/*
1220	 *	See if we can append the packet
1221	 */
1222	if (c->mfc_un.unres.unresolved.qlen > 3) {
1223		kfree_skb(skb);
1224		err = -ENOBUFS;
1225	} else {
1226		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
 
 
 
 
1227		err = 0;
1228	}
1229
1230	spin_unlock_bh(&mfc_unres_lock);
1231	return err;
1232}
1233
1234/*
1235 *	MFC6 cache manipulation by user space
1236 */
1237
1238static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
 
1239{
1240	int line;
1241	struct mfc6_cache *c, *next;
1242
1243	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1244
1245	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1246		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1247		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1248			write_lock_bh(&mrt_lock);
1249			list_del(&c->list);
1250			write_unlock_bh(&mrt_lock);
 
 
 
1251
1252			ip6mr_cache_free(c);
1253			return 0;
1254		}
1255	}
1256	return -ENOENT;
1257}
1258
1259static int ip6mr_device_event(struct notifier_block *this,
1260			      unsigned long event, void *ptr)
1261{
1262	struct net_device *dev = ptr;
1263	struct net *net = dev_net(dev);
1264	struct mr6_table *mrt;
1265	struct mif_device *v;
1266	int ct;
1267	LIST_HEAD(list);
1268
1269	if (event != NETDEV_UNREGISTER)
1270		return NOTIFY_DONE;
1271
1272	ip6mr_for_each_table(mrt, net) {
1273		v = &mrt->vif6_table[0];
1274		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1275			if (v->dev == dev)
1276				mif6_delete(mrt, ct, &list);
1277		}
1278	}
1279	unregister_netdevice_many(&list);
1280
1281	return NOTIFY_DONE;
1282}
1283
 
 
 
 
 
 
 
 
 
 
 
 
 
1284static struct notifier_block ip6_mr_notifier = {
1285	.notifier_call = ip6mr_device_event
1286};
1287
1288/*
1289 *	Setup for IP multicast routing
1290 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291
 
1292static int __net_init ip6mr_net_init(struct net *net)
1293{
1294	int err;
1295
 
 
 
 
1296	err = ip6mr_rules_init(net);
1297	if (err < 0)
1298		goto fail;
1299
1300#ifdef CONFIG_PROC_FS
1301	err = -ENOMEM;
1302	if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
 
1303		goto proc_vif_fail;
1304	if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
 
1305		goto proc_cache_fail;
1306#endif
1307
1308	return 0;
1309
1310#ifdef CONFIG_PROC_FS
1311proc_cache_fail:
1312	proc_net_remove(net, "ip6_mr_vif");
1313proc_vif_fail:
1314	ip6mr_rules_exit(net);
1315#endif
1316fail:
 
1317	return err;
1318}
1319
1320static void __net_exit ip6mr_net_exit(struct net *net)
1321{
1322#ifdef CONFIG_PROC_FS
1323	proc_net_remove(net, "ip6_mr_cache");
1324	proc_net_remove(net, "ip6_mr_vif");
1325#endif
1326	ip6mr_rules_exit(net);
 
1327}
1328
1329static struct pernet_operations ip6mr_net_ops = {
1330	.init = ip6mr_net_init,
1331	.exit = ip6mr_net_exit,
1332};
1333
1334int __init ip6_mr_init(void)
1335{
1336	int err;
1337
1338	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1339				       sizeof(struct mfc6_cache),
1340				       0, SLAB_HWCACHE_ALIGN,
1341				       NULL);
1342	if (!mrt_cachep)
1343		return -ENOMEM;
1344
1345	err = register_pernet_subsys(&ip6mr_net_ops);
1346	if (err)
1347		goto reg_pernet_fail;
1348
1349	err = register_netdevice_notifier(&ip6_mr_notifier);
1350	if (err)
1351		goto reg_notif_fail;
1352#ifdef CONFIG_IPV6_PIMSM_V2
1353	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1354		printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1355		err = -EAGAIN;
1356		goto add_proto_fail;
1357	}
1358#endif
1359	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1360		      ip6mr_rtm_dumproute, NULL);
1361	return 0;
 
 
1362#ifdef CONFIG_IPV6_PIMSM_V2
 
1363add_proto_fail:
1364	unregister_netdevice_notifier(&ip6_mr_notifier);
1365#endif
1366reg_notif_fail:
1367	unregister_pernet_subsys(&ip6mr_net_ops);
1368reg_pernet_fail:
1369	kmem_cache_destroy(mrt_cachep);
1370	return err;
1371}
1372
1373void ip6_mr_cleanup(void)
1374{
 
 
 
 
1375	unregister_netdevice_notifier(&ip6_mr_notifier);
1376	unregister_pernet_subsys(&ip6mr_net_ops);
1377	kmem_cache_destroy(mrt_cachep);
1378}
1379
1380static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1381			 struct mf6cctl *mfc, int mrtsock)
1382{
1383	bool found = false;
1384	int line;
1385	struct mfc6_cache *uc, *c;
1386	unsigned char ttls[MAXMIFS];
1387	int i;
 
 
 
1388
1389	if (mfc->mf6cc_parent >= MAXMIFS)
1390		return -ENFILE;
1391
1392	memset(ttls, 255, MAXMIFS);
1393	for (i = 0; i < MAXMIFS; i++) {
1394		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1395			ttls[i] = 1;
1396
1397	}
1398
1399	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1400
1401	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1402		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1403		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1404			found = true;
1405			break;
1406		}
1407	}
1408
1409	if (found) {
1410		write_lock_bh(&mrt_lock);
1411		c->mf6c_parent = mfc->mf6cc_parent;
1412		ip6mr_update_thresholds(mrt, c, ttls);
1413		if (!mrtsock)
1414			c->mfc_flags |= MFC_STATIC;
1415		write_unlock_bh(&mrt_lock);
 
 
 
1416		return 0;
1417	}
1418
1419	if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
 
1420		return -EINVAL;
1421
1422	c = ip6mr_cache_alloc();
1423	if (c == NULL)
1424		return -ENOMEM;
1425
1426	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1427	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1428	c->mf6c_parent = mfc->mf6cc_parent;
1429	ip6mr_update_thresholds(mrt, c, ttls);
1430	if (!mrtsock)
1431		c->mfc_flags |= MFC_STATIC;
1432
1433	write_lock_bh(&mrt_lock);
1434	list_add(&c->list, &mrt->mfc6_cache_array[line]);
1435	write_unlock_bh(&mrt_lock);
 
 
 
 
 
1436
1437	/*
1438	 *	Check to see if we resolved a queued list. If so we
1439	 *	need to send on the frames and tidy up.
1440	 */
1441	found = false;
1442	spin_lock_bh(&mfc_unres_lock);
1443	list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
 
1444		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1445		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1446			list_del(&uc->list);
1447			atomic_dec(&mrt->cache_resolve_queue_len);
1448			found = true;
1449			break;
1450		}
1451	}
1452	if (list_empty(&mrt->mfc6_unres_queue))
1453		del_timer(&mrt->ipmr_expire_timer);
1454	spin_unlock_bh(&mfc_unres_lock);
1455
1456	if (found) {
1457		ip6mr_cache_resolve(net, mrt, uc, c);
1458		ip6mr_cache_free(uc);
1459	}
 
 
 
1460	return 0;
1461}
1462
1463/*
1464 *	Close the multicast socket, and clear the vif tables etc
1465 */
1466
1467static void mroute_clean_tables(struct mr6_table *mrt)
1468{
1469	int i;
1470	LIST_HEAD(list);
1471	struct mfc6_cache *c, *next;
1472
1473	/*
1474	 *	Shut down all active vif entries
1475	 */
1476	for (i = 0; i < mrt->maxvif; i++) {
1477		if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1478			mif6_delete(mrt, i, &list);
 
 
 
 
1479	}
1480	unregister_netdevice_many(&list);
1481
1482	/*
1483	 *	Wipe the cache
1484	 */
1485	for (i = 0; i < MFC6_LINES; i++) {
1486		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1487			if (c->mfc_flags & MFC_STATIC)
1488				continue;
1489			write_lock_bh(&mrt_lock);
1490			list_del(&c->list);
1491			write_unlock_bh(&mrt_lock);
1492
1493			ip6mr_cache_free(c);
 
 
1494		}
1495	}
1496
1497	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1498		spin_lock_bh(&mfc_unres_lock);
1499		list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1500			list_del(&c->list);
1501			ip6mr_destroy_unres(mrt, c);
 
 
 
 
 
1502		}
1503		spin_unlock_bh(&mfc_unres_lock);
1504	}
1505}
1506
1507static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1508{
1509	int err = 0;
1510	struct net *net = sock_net(sk);
1511
1512	rtnl_lock();
1513	write_lock_bh(&mrt_lock);
1514	if (likely(mrt->mroute6_sk == NULL)) {
1515		mrt->mroute6_sk = sk;
 
 
 
1516		net->ipv6.devconf_all->mc_forwarding++;
1517	}
1518	else
1519		err = -EADDRINUSE;
1520	write_unlock_bh(&mrt_lock);
1521
 
 
 
 
 
1522	rtnl_unlock();
1523
1524	return err;
1525}
1526
1527int ip6mr_sk_done(struct sock *sk)
1528{
1529	int err = -EACCES;
1530	struct net *net = sock_net(sk);
1531	struct mr6_table *mrt;
 
 
 
 
1532
1533	rtnl_lock();
1534	ip6mr_for_each_table(mrt, net) {
1535		if (sk == mrt->mroute6_sk) {
1536			write_lock_bh(&mrt_lock);
1537			mrt->mroute6_sk = NULL;
 
 
 
 
1538			net->ipv6.devconf_all->mc_forwarding--;
1539			write_unlock_bh(&mrt_lock);
 
 
 
 
1540
1541			mroute_clean_tables(mrt);
1542			err = 0;
1543			break;
1544		}
1545	}
1546	rtnl_unlock();
1547
1548	return err;
1549}
1550
1551struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1552{
1553	struct mr6_table *mrt;
1554	struct flowi6 fl6 = {
1555		.flowi6_iif	= skb->skb_iif,
1556		.flowi6_oif	= skb->dev->ifindex,
1557		.flowi6_mark	= skb->mark,
1558	};
1559
1560	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1561		return NULL;
1562
1563	return mrt->mroute6_sk;
1564}
 
1565
1566/*
1567 *	Socket options and virtual interface manipulation. The whole
1568 *	virtual interface system is a complete heap, but unfortunately
1569 *	that's how BSD mrouted happens to think. Maybe one day with a proper
1570 *	MOSPF/PIM router set up we can clean this up.
1571 */
1572
1573int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1574{
1575	int ret;
1576	struct mif6ctl vif;
1577	struct mf6cctl mfc;
1578	mifi_t mifi;
1579	struct net *net = sock_net(sk);
1580	struct mr6_table *mrt;
 
 
 
 
1581
1582	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1583	if (mrt == NULL)
1584		return -ENOENT;
1585
1586	if (optname != MRT6_INIT) {
1587		if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
 
1588			return -EACCES;
1589	}
1590
1591	switch (optname) {
1592	case MRT6_INIT:
1593		if (sk->sk_type != SOCK_RAW ||
1594		    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1595			return -EOPNOTSUPP;
1596		if (optlen < sizeof(int))
1597			return -EINVAL;
1598
1599		return ip6mr_sk_init(mrt, sk);
1600
1601	case MRT6_DONE:
1602		return ip6mr_sk_done(sk);
1603
1604	case MRT6_ADD_MIF:
1605		if (optlen < sizeof(vif))
1606			return -EINVAL;
1607		if (copy_from_user(&vif, optval, sizeof(vif)))
1608			return -EFAULT;
1609		if (vif.mif6c_mifi >= MAXMIFS)
1610			return -ENFILE;
1611		rtnl_lock();
1612		ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
 
1613		rtnl_unlock();
1614		return ret;
1615
1616	case MRT6_DEL_MIF:
1617		if (optlen < sizeof(mifi_t))
1618			return -EINVAL;
1619		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1620			return -EFAULT;
1621		rtnl_lock();
1622		ret = mif6_delete(mrt, mifi, NULL);
1623		rtnl_unlock();
1624		return ret;
1625
1626	/*
1627	 *	Manipulate the forwarding caches. These live
1628	 *	in a sort of kernel/user symbiosis.
1629	 */
1630	case MRT6_ADD_MFC:
1631	case MRT6_DEL_MFC:
 
 
 
 
1632		if (optlen < sizeof(mfc))
1633			return -EINVAL;
1634		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1635			return -EFAULT;
 
 
1636		rtnl_lock();
1637		if (optname == MRT6_DEL_MFC)
1638			ret = ip6mr_mfc_delete(mrt, &mfc);
1639		else
1640			ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
 
 
 
1641		rtnl_unlock();
1642		return ret;
1643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1644	/*
1645	 *	Control PIM assert (to activate pim will activate assert)
1646	 */
1647	case MRT6_ASSERT:
1648	{
1649		int v;
 
 
 
1650		if (get_user(v, (int __user *)optval))
1651			return -EFAULT;
1652		mrt->mroute_do_assert = !!v;
1653		return 0;
1654	}
1655
1656#ifdef CONFIG_IPV6_PIMSM_V2
1657	case MRT6_PIM:
1658	{
1659		int v;
 
 
 
1660		if (get_user(v, (int __user *)optval))
1661			return -EFAULT;
1662		v = !!v;
1663		rtnl_lock();
1664		ret = 0;
1665		if (v != mrt->mroute_do_pim) {
1666			mrt->mroute_do_pim = v;
1667			mrt->mroute_do_assert = v;
1668		}
1669		rtnl_unlock();
1670		return ret;
1671	}
1672
1673#endif
1674#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1675	case MRT6_TABLE:
1676	{
1677		u32 v;
1678
1679		if (optlen != sizeof(u32))
1680			return -EINVAL;
1681		if (get_user(v, (u32 __user *)optval))
1682			return -EFAULT;
1683		if (sk == mrt->mroute6_sk)
 
 
 
1684			return -EBUSY;
1685
1686		rtnl_lock();
1687		ret = 0;
1688		if (!ip6mr_new_table(net, v))
1689			ret = -ENOMEM;
1690		raw6_sk(sk)->ip6mr_table = v;
 
 
1691		rtnl_unlock();
1692		return ret;
1693	}
1694#endif
1695	/*
1696	 *	Spurious command, or MRT6_VERSION which you cannot
1697	 *	set.
1698	 */
1699	default:
1700		return -ENOPROTOOPT;
1701	}
1702}
1703
1704/*
1705 *	Getsock opt support for the multicast routing system.
1706 */
1707
1708int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1709			  int __user *optlen)
1710{
1711	int olr;
1712	int val;
1713	struct net *net = sock_net(sk);
1714	struct mr6_table *mrt;
 
 
 
 
1715
1716	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1717	if (mrt == NULL)
1718		return -ENOENT;
1719
1720	switch (optname) {
1721	case MRT6_VERSION:
1722		val = 0x0305;
1723		break;
1724#ifdef CONFIG_IPV6_PIMSM_V2
1725	case MRT6_PIM:
1726		val = mrt->mroute_do_pim;
1727		break;
1728#endif
1729	case MRT6_ASSERT:
1730		val = mrt->mroute_do_assert;
1731		break;
1732	default:
1733		return -ENOPROTOOPT;
1734	}
1735
1736	if (get_user(olr, optlen))
1737		return -EFAULT;
1738
1739	olr = min_t(int, olr, sizeof(int));
1740	if (olr < 0)
1741		return -EINVAL;
1742
1743	if (put_user(olr, optlen))
1744		return -EFAULT;
1745	if (copy_to_user(optval, &val, olr))
1746		return -EFAULT;
1747	return 0;
1748}
1749
1750/*
1751 *	The IP multicast ioctl support routines.
1752 */
1753
1754int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1755{
1756	struct sioc_sg_req6 sr;
1757	struct sioc_mif_req6 vr;
1758	struct mif_device *vif;
1759	struct mfc6_cache *c;
1760	struct net *net = sock_net(sk);
1761	struct mr6_table *mrt;
1762
1763	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1764	if (mrt == NULL)
1765		return -ENOENT;
1766
1767	switch (cmd) {
1768	case SIOCGETMIFCNT_IN6:
1769		if (copy_from_user(&vr, arg, sizeof(vr)))
1770			return -EFAULT;
1771		if (vr.mifi >= mrt->maxvif)
1772			return -EINVAL;
 
1773		read_lock(&mrt_lock);
1774		vif = &mrt->vif6_table[vr.mifi];
1775		if (MIF_EXISTS(mrt, vr.mifi)) {
1776			vr.icount = vif->pkt_in;
1777			vr.ocount = vif->pkt_out;
1778			vr.ibytes = vif->bytes_in;
1779			vr.obytes = vif->bytes_out;
1780			read_unlock(&mrt_lock);
1781
1782			if (copy_to_user(arg, &vr, sizeof(vr)))
1783				return -EFAULT;
1784			return 0;
1785		}
1786		read_unlock(&mrt_lock);
1787		return -EADDRNOTAVAIL;
1788	case SIOCGETSGCNT_IN6:
1789		if (copy_from_user(&sr, arg, sizeof(sr)))
1790			return -EFAULT;
1791
1792		read_lock(&mrt_lock);
1793		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1794		if (c) {
1795			sr.pktcnt = c->mfc_un.res.pkt;
1796			sr.bytecnt = c->mfc_un.res.bytes;
1797			sr.wrong_if = c->mfc_un.res.wrong_if;
1798			read_unlock(&mrt_lock);
1799
1800			if (copy_to_user(arg, &sr, sizeof(sr)))
1801				return -EFAULT;
1802			return 0;
1803		}
1804		read_unlock(&mrt_lock);
1805		return -EADDRNOTAVAIL;
1806	default:
1807		return -ENOIOCTLCMD;
1808	}
1809}
1810
1811#ifdef CONFIG_COMPAT
1812struct compat_sioc_sg_req6 {
1813	struct sockaddr_in6 src;
1814	struct sockaddr_in6 grp;
1815	compat_ulong_t pktcnt;
1816	compat_ulong_t bytecnt;
1817	compat_ulong_t wrong_if;
1818};
1819
1820struct compat_sioc_mif_req6 {
1821	mifi_t	mifi;
1822	compat_ulong_t icount;
1823	compat_ulong_t ocount;
1824	compat_ulong_t ibytes;
1825	compat_ulong_t obytes;
1826};
1827
1828int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1829{
1830	struct compat_sioc_sg_req6 sr;
1831	struct compat_sioc_mif_req6 vr;
1832	struct mif_device *vif;
1833	struct mfc6_cache *c;
1834	struct net *net = sock_net(sk);
1835	struct mr6_table *mrt;
1836
1837	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1838	if (mrt == NULL)
1839		return -ENOENT;
1840
1841	switch (cmd) {
1842	case SIOCGETMIFCNT_IN6:
1843		if (copy_from_user(&vr, arg, sizeof(vr)))
1844			return -EFAULT;
1845		if (vr.mifi >= mrt->maxvif)
1846			return -EINVAL;
 
1847		read_lock(&mrt_lock);
1848		vif = &mrt->vif6_table[vr.mifi];
1849		if (MIF_EXISTS(mrt, vr.mifi)) {
1850			vr.icount = vif->pkt_in;
1851			vr.ocount = vif->pkt_out;
1852			vr.ibytes = vif->bytes_in;
1853			vr.obytes = vif->bytes_out;
1854			read_unlock(&mrt_lock);
1855
1856			if (copy_to_user(arg, &vr, sizeof(vr)))
1857				return -EFAULT;
1858			return 0;
1859		}
1860		read_unlock(&mrt_lock);
1861		return -EADDRNOTAVAIL;
1862	case SIOCGETSGCNT_IN6:
1863		if (copy_from_user(&sr, arg, sizeof(sr)))
1864			return -EFAULT;
1865
1866		read_lock(&mrt_lock);
1867		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1868		if (c) {
1869			sr.pktcnt = c->mfc_un.res.pkt;
1870			sr.bytecnt = c->mfc_un.res.bytes;
1871			sr.wrong_if = c->mfc_un.res.wrong_if;
1872			read_unlock(&mrt_lock);
1873
1874			if (copy_to_user(arg, &sr, sizeof(sr)))
1875				return -EFAULT;
1876			return 0;
1877		}
1878		read_unlock(&mrt_lock);
1879		return -EADDRNOTAVAIL;
1880	default:
1881		return -ENOIOCTLCMD;
1882	}
1883}
1884#endif
1885
1886static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1887{
1888	IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1889			 IPSTATS_MIB_OUTFORWDATAGRAMS);
1890	return dst_output(skb);
 
 
1891}
1892
1893/*
1894 *	Processing handlers for ip6mr_forward
1895 */
1896
1897static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1898			  struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1899{
1900	struct ipv6hdr *ipv6h;
1901	struct mif_device *vif = &mrt->vif6_table[vifi];
1902	struct net_device *dev;
1903	struct dst_entry *dst;
1904	struct flowi6 fl6;
1905
1906	if (vif->dev == NULL)
1907		goto out_free;
1908
1909#ifdef CONFIG_IPV6_PIMSM_V2
1910	if (vif->flags & MIFF_REGISTER) {
1911		vif->pkt_out++;
1912		vif->bytes_out += skb->len;
1913		vif->dev->stats.tx_bytes += skb->len;
1914		vif->dev->stats.tx_packets++;
1915		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1916		goto out_free;
1917	}
1918#endif
1919
1920	ipv6h = ipv6_hdr(skb);
1921
1922	fl6 = (struct flowi6) {
1923		.flowi6_oif = vif->link,
1924		.daddr = ipv6h->daddr,
1925	};
1926
1927	dst = ip6_route_output(net, NULL, &fl6);
1928	if (!dst)
 
1929		goto out_free;
 
1930
1931	skb_dst_drop(skb);
1932	skb_dst_set(skb, dst);
1933
1934	/*
1935	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1936	 * not only before forwarding, but after forwarding on all output
1937	 * interfaces. It is clear, if mrouter runs a multicasting
1938	 * program, it should receive packets not depending to what interface
1939	 * program is joined.
1940	 * If we will not make it, the program will have to join on all
1941	 * interfaces. On the other hand, multihoming host (or router, but
1942	 * not mrouter) cannot join to more than one interface - it will
1943	 * result in receiving multiple packets.
1944	 */
1945	dev = vif->dev;
1946	skb->dev = dev;
1947	vif->pkt_out++;
1948	vif->bytes_out += skb->len;
1949
1950	/* We are about to write */
1951	/* XXX: extension headers? */
1952	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1953		goto out_free;
1954
1955	ipv6h = ipv6_hdr(skb);
1956	ipv6h->hop_limit--;
1957
1958	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1959
1960	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
 
1961		       ip6mr_forward2_finish);
1962
1963out_free:
1964	kfree_skb(skb);
1965	return 0;
1966}
1967
1968static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
1969{
1970	int ct;
1971
1972	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
1973		if (mrt->vif6_table[ct].dev == dev)
1974			break;
1975	}
1976	return ct;
1977}
1978
1979static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
1980			  struct sk_buff *skb, struct mfc6_cache *cache)
 
1981{
1982	int psend = -1;
1983	int vif, ct;
 
 
 
 
 
 
 
 
 
1984
1985	vif = cache->mf6c_parent;
1986	cache->mfc_un.res.pkt++;
1987	cache->mfc_un.res.bytes += skb->len;
 
 
 
 
 
 
 
 
 
1988
1989	/*
1990	 * Wrong interface: drop packet and (maybe) send PIM assert.
1991	 */
1992	if (mrt->vif6_table[vif].dev != skb->dev) {
1993		int true_vifi;
1994
1995		cache->mfc_un.res.wrong_if++;
1996		true_vifi = ip6mr_find_vif(mrt, skb->dev);
1997
1998		if (true_vifi >= 0 && mrt->mroute_do_assert &&
1999		    /* pimsm uses asserts, when switching from RPT to SPT,
2000		       so that we cannot check that packet arrived on an oif.
2001		       It is bad, but otherwise we would need to move pretty
2002		       large chunk of pimd to kernel. Ough... --ANK
2003		     */
2004		    (mrt->mroute_do_pim ||
2005		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2006		    time_after(jiffies,
2007			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2008			cache->mfc_un.res.last_assert = jiffies;
 
2009			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2010		}
2011		goto dont_forward;
2012	}
2013
2014	mrt->vif6_table[vif].pkt_in++;
2015	mrt->vif6_table[vif].bytes_in += skb->len;
 
2016
2017	/*
2018	 *	Forward the frame
2019	 */
2020	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2021		if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2022			if (psend != -1) {
2023				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2024				if (skb2)
2025					ip6mr_forward2(net, mrt, skb2, cache, psend);
2026			}
2027			psend = ct;
2028		}
2029	}
 
2030	if (psend != -1) {
2031		ip6mr_forward2(net, mrt, skb, cache, psend);
2032		return 0;
2033	}
2034
2035dont_forward:
2036	kfree_skb(skb);
2037	return 0;
2038}
2039
2040
2041/*
2042 *	Multicast packets for forwarding arrive here
2043 */
2044
2045int ip6_mr_input(struct sk_buff *skb)
2046{
2047	struct mfc6_cache *cache;
2048	struct net *net = dev_net(skb->dev);
2049	struct mr6_table *mrt;
2050	struct flowi6 fl6 = {
2051		.flowi6_iif	= skb->dev->ifindex,
2052		.flowi6_mark	= skb->mark,
2053	};
2054	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
2055
2056	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2057	if (err < 0) {
2058		kfree_skb(skb);
2059		return err;
2060	}
2061
2062	read_lock(&mrt_lock);
2063	cache = ip6mr_cache_find(mrt,
2064				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
 
 
 
 
 
 
 
 
2065
2066	/*
2067	 *	No usable cache entry
2068	 */
2069	if (cache == NULL) {
2070		int vif;
2071
2072		vif = ip6mr_find_vif(mrt, skb->dev);
2073		if (vif >= 0) {
2074			int err = ip6mr_cache_unresolved(mrt, vif, skb);
2075			read_unlock(&mrt_lock);
2076
2077			return err;
2078		}
2079		read_unlock(&mrt_lock);
2080		kfree_skb(skb);
2081		return -ENODEV;
2082	}
2083
2084	ip6_mr_forward(net, mrt, skb, cache);
2085
2086	read_unlock(&mrt_lock);
2087
2088	return 0;
2089}
2090
2091
2092static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2093			       struct mfc6_cache *c, struct rtmsg *rtm)
2094{
2095	int ct;
2096	struct rtnexthop *nhp;
2097	u8 *b = skb_tail_pointer(skb);
2098	struct rtattr *mp_head;
2099
2100	/* If cache is unresolved, don't try to parse IIF and OIF */
2101	if (c->mf6c_parent >= MAXMIFS)
2102		return -ENOENT;
2103
2104	if (MIF_EXISTS(mrt, c->mf6c_parent))
2105		RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
2106
2107	mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2108
2109	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2110		if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2111			if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2112				goto rtattr_failure;
2113			nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2114			nhp->rtnh_flags = 0;
2115			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2116			nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2117			nhp->rtnh_len = sizeof(*nhp);
2118		}
2119	}
2120	mp_head->rta_type = RTA_MULTIPATH;
2121	mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
2122	rtm->rtm_type = RTN_MULTICAST;
2123	return 1;
2124
2125rtattr_failure:
2126	nlmsg_trim(skb, b);
2127	return -EMSGSIZE;
2128}
2129
2130int ip6mr_get_route(struct net *net,
2131		    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2132{
2133	int err;
2134	struct mr6_table *mrt;
2135	struct mfc6_cache *cache;
2136	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2137
2138	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2139	if (mrt == NULL)
2140		return -ENOENT;
2141
2142	read_lock(&mrt_lock);
2143	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
 
 
 
 
 
 
 
2144
2145	if (!cache) {
2146		struct sk_buff *skb2;
2147		struct ipv6hdr *iph;
2148		struct net_device *dev;
2149		int vif;
2150
2151		if (nowait) {
2152			read_unlock(&mrt_lock);
2153			return -EAGAIN;
2154		}
2155
2156		dev = skb->dev;
2157		if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2158			read_unlock(&mrt_lock);
2159			return -ENODEV;
2160		}
2161
2162		/* really correct? */
2163		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2164		if (!skb2) {
2165			read_unlock(&mrt_lock);
2166			return -ENOMEM;
2167		}
2168
 
2169		skb_reset_transport_header(skb2);
2170
2171		skb_put(skb2, sizeof(struct ipv6hdr));
2172		skb_reset_network_header(skb2);
2173
2174		iph = ipv6_hdr(skb2);
2175		iph->version = 0;
2176		iph->priority = 0;
2177		iph->flow_lbl[0] = 0;
2178		iph->flow_lbl[1] = 0;
2179		iph->flow_lbl[2] = 0;
2180		iph->payload_len = 0;
2181		iph->nexthdr = IPPROTO_NONE;
2182		iph->hop_limit = 0;
2183		ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
2184		ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
2185
2186		err = ip6mr_cache_unresolved(mrt, vif, skb2);
2187		read_unlock(&mrt_lock);
2188
2189		return err;
2190	}
2191
2192	if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2193		cache->mfc_flags |= MFC_NOTIFY;
2194
2195	err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2196	read_unlock(&mrt_lock);
2197	return err;
2198}
2199
2200static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2201			     u32 pid, u32 seq, struct mfc6_cache *c)
 
2202{
2203	struct nlmsghdr *nlh;
2204	struct rtmsg *rtm;
 
2205
2206	nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2207	if (nlh == NULL)
2208		return -EMSGSIZE;
2209
2210	rtm = nlmsg_data(nlh);
2211	rtm->rtm_family   = RTNL_FAMILY_IPMR;
2212	rtm->rtm_dst_len  = 128;
2213	rtm->rtm_src_len  = 128;
2214	rtm->rtm_tos      = 0;
2215	rtm->rtm_table    = mrt->id;
2216	NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
 
 
2217	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2218	rtm->rtm_protocol = RTPROT_UNSPEC;
 
 
 
2219	rtm->rtm_flags    = 0;
2220
2221	NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
2222	NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
2223
2224	if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
 
 
2225		goto nla_put_failure;
2226
2227	return nlmsg_end(skb, nlh);
 
2228
2229nla_put_failure:
2230	nlmsg_cancel(skb, nlh);
2231	return -EMSGSIZE;
2232}
2233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2234static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2235{
2236	struct net *net = sock_net(skb->sk);
2237	struct mr6_table *mrt;
2238	struct mfc6_cache *mfc;
2239	unsigned int t = 0, s_t;
2240	unsigned int h = 0, s_h;
2241	unsigned int e = 0, s_e;
2242
2243	s_t = cb->args[0];
2244	s_h = cb->args[1];
2245	s_e = cb->args[2];
2246
2247	read_lock(&mrt_lock);
2248	ip6mr_for_each_table(mrt, net) {
2249		if (t < s_t)
2250			goto next_table;
2251		if (t > s_t)
2252			s_h = 0;
2253		for (h = s_h; h < MFC6_LINES; h++) {
2254			list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2255				if (e < s_e)
2256					goto next_entry;
2257				if (ip6mr_fill_mroute(mrt, skb,
2258						      NETLINK_CB(cb->skb).pid,
2259						      cb->nlh->nlmsg_seq,
2260						      mfc) < 0)
2261					goto done;
2262next_entry:
2263				e++;
2264			}
2265			e = s_e = 0;
2266		}
2267		s_h = 0;
2268next_table:
2269		t++;
2270	}
2271done:
2272	read_unlock(&mrt_lock);
2273
2274	cb->args[2] = e;
2275	cb->args[1] = h;
2276	cb->args[0] = t;
 
 
 
 
 
 
 
 
 
 
 
 
2277
2278	return skb->len;
 
2279}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux IPv6 multicast routing support for BSD pim6sd
   4 *	Based on net/ipv4/ipmr.c.
   5 *
   6 *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
   7 *		LSIIT Laboratory, Strasbourg, France
   8 *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
   9 *		6WIND, Paris, France
  10 *	Copyright (C)2007,2008 USAGI/WIDE Project
  11 *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
 
 
 
 
 
 
  12 */
  13
  14#include <linux/uaccess.h>
 
  15#include <linux/types.h>
  16#include <linux/sched.h>
  17#include <linux/errno.h>
 
  18#include <linux/mm.h>
  19#include <linux/kernel.h>
  20#include <linux/fcntl.h>
  21#include <linux/stat.h>
  22#include <linux/socket.h>
  23#include <linux/inet.h>
  24#include <linux/netdevice.h>
  25#include <linux/inetdevice.h>
  26#include <linux/proc_fs.h>
  27#include <linux/seq_file.h>
  28#include <linux/init.h>
 
  29#include <linux/compat.h>
  30#include <linux/rhashtable.h>
  31#include <net/protocol.h>
  32#include <linux/skbuff.h>
 
  33#include <net/raw.h>
  34#include <linux/notifier.h>
  35#include <linux/if_arp.h>
  36#include <net/checksum.h>
  37#include <net/netlink.h>
  38#include <net/fib_rules.h>
  39
  40#include <net/ipv6.h>
  41#include <net/ip6_route.h>
  42#include <linux/mroute6.h>
  43#include <linux/pim.h>
  44#include <net/addrconf.h>
  45#include <linux/netfilter_ipv6.h>
  46#include <linux/export.h>
  47#include <net/ip6_checksum.h>
  48#include <linux/netconf.h>
  49#include <net/ip_tunnels.h>
  50
  51#include <linux/nospec.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52
  53struct ip6mr_rule {
  54	struct fib_rule		common;
  55};
  56
  57struct ip6mr_result {
  58	struct mr_table	*mrt;
  59};
  60
  61/* Big lock, protecting vif table, mrt cache and mroute socket state.
  62   Note that the changes are semaphored via rtnl_lock.
  63 */
  64
  65static DEFINE_RWLOCK(mrt_lock);
  66
  67/* Multicast router control variables */
 
 
 
 
  68
  69/* Special spinlock for queue of unresolved entries */
  70static DEFINE_SPINLOCK(mfc_unres_lock);
  71
  72/* We return to original Alan's scheme. Hash table of resolved
  73   entries is changed only in process context and protected
  74   with weak lock mrt_lock. Queue of unresolved entries is protected
  75   with strong spinlock mfc_unres_lock.
  76
  77   In this case data path is free of exclusive locks at all.
  78 */
  79
  80static struct kmem_cache *mrt_cachep __read_mostly;
  81
  82static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
  83static void ip6mr_free_table(struct mr_table *mrt);
  84
  85static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
  86			   struct net_device *dev, struct sk_buff *skb,
  87			   struct mfc6_cache *cache);
  88static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
  89			      mifi_t mifi, int assert);
  90static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
  91			      int cmd);
  92static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
  93static int ip6mr_rtm_dumproute(struct sk_buff *skb,
  94			       struct netlink_callback *cb);
  95static void mroute_clean_tables(struct mr_table *mrt, int flags);
  96static void ipmr_expire_process(struct timer_list *t);
  97
  98#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
  99#define ip6mr_for_each_table(mrt, net) \
 100	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
 101
 102static struct mr_table *ip6mr_mr_table_iter(struct net *net,
 103					    struct mr_table *mrt)
 104{
 105	struct mr_table *ret;
 106
 107	if (!mrt)
 108		ret = list_entry_rcu(net->ipv6.mr6_tables.next,
 109				     struct mr_table, list);
 110	else
 111		ret = list_entry_rcu(mrt->list.next,
 112				     struct mr_table, list);
 113
 114	if (&ret->list == &net->ipv6.mr6_tables)
 115		return NULL;
 116	return ret;
 117}
 118
 119static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
 120{
 121	struct mr_table *mrt;
 122
 123	ip6mr_for_each_table(mrt, net) {
 124		if (mrt->id == id)
 125			return mrt;
 126	}
 127	return NULL;
 128}
 129
 130static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 131			    struct mr_table **mrt)
 132{
 
 
 133	int err;
 134	struct ip6mr_result res;
 135	struct fib_lookup_arg arg = {
 136		.result = &res,
 137		.flags = FIB_LOOKUP_NOREF,
 138	};
 139
 140	/* update flow if oif or iif point to device enslaved to l3mdev */
 141	l3mdev_update_flow(net, flowi6_to_flowi(flp6));
 142
 143	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 144			       flowi6_to_flowi(flp6), 0, &arg);
 145	if (err < 0)
 146		return err;
 147	*mrt = res.mrt;
 148	return 0;
 149}
 150
 151static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
 152			     int flags, struct fib_lookup_arg *arg)
 153{
 154	struct ip6mr_result *res = arg->result;
 155	struct mr_table *mrt;
 156
 157	switch (rule->action) {
 158	case FR_ACT_TO_TBL:
 159		break;
 160	case FR_ACT_UNREACHABLE:
 161		return -ENETUNREACH;
 162	case FR_ACT_PROHIBIT:
 163		return -EACCES;
 164	case FR_ACT_BLACKHOLE:
 165	default:
 166		return -EINVAL;
 167	}
 168
 169	arg->table = fib_rule_get_table(rule, arg);
 170
 171	mrt = ip6mr_get_table(rule->fr_net, arg->table);
 172	if (!mrt)
 173		return -EAGAIN;
 174	res->mrt = mrt;
 175	return 0;
 176}
 177
 178static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
 179{
 180	return 1;
 181}
 182
 183static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
 184	FRA_GENERIC_POLICY,
 185};
 186
 187static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
 188				struct fib_rule_hdr *frh, struct nlattr **tb,
 189				struct netlink_ext_ack *extack)
 190{
 191	return 0;
 192}
 193
 194static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 195			      struct nlattr **tb)
 196{
 197	return 1;
 198}
 199
 200static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
 201			   struct fib_rule_hdr *frh)
 202{
 203	frh->dst_len = 0;
 204	frh->src_len = 0;
 205	frh->tos     = 0;
 206	return 0;
 207}
 208
 209static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
 210	.family		= RTNL_FAMILY_IP6MR,
 211	.rule_size	= sizeof(struct ip6mr_rule),
 212	.addr_size	= sizeof(struct in6_addr),
 213	.action		= ip6mr_rule_action,
 214	.match		= ip6mr_rule_match,
 215	.configure	= ip6mr_rule_configure,
 216	.compare	= ip6mr_rule_compare,
 
 217	.fill		= ip6mr_rule_fill,
 218	.nlgroup	= RTNLGRP_IPV6_RULE,
 219	.policy		= ip6mr_rule_policy,
 220	.owner		= THIS_MODULE,
 221};
 222
 223static int __net_init ip6mr_rules_init(struct net *net)
 224{
 225	struct fib_rules_ops *ops;
 226	struct mr_table *mrt;
 227	int err;
 228
 229	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
 230	if (IS_ERR(ops))
 231		return PTR_ERR(ops);
 232
 233	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
 234
 235	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 236	if (IS_ERR(mrt)) {
 237		err = PTR_ERR(mrt);
 238		goto err1;
 239	}
 240
 241	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
 242	if (err < 0)
 243		goto err2;
 244
 245	net->ipv6.mr6_rules_ops = ops;
 246	return 0;
 247
 248err2:
 249	ip6mr_free_table(mrt);
 250err1:
 251	fib_rules_unregister(ops);
 252	return err;
 253}
 254
 255static void __net_exit ip6mr_rules_exit(struct net *net)
 256{
 257	struct mr_table *mrt, *next;
 258
 259	rtnl_lock();
 260	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
 261		list_del(&mrt->list);
 262		ip6mr_free_table(mrt);
 263	}
 264	fib_rules_unregister(net->ipv6.mr6_rules_ops);
 265	rtnl_unlock();
 266}
 267
 268static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
 269{
 270	return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
 271}
 272
 273static unsigned int ip6mr_rules_seq_read(struct net *net)
 274{
 275	return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
 276}
 277
 278bool ip6mr_rule_default(const struct fib_rule *rule)
 279{
 280	return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
 281	       rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
 282}
 283EXPORT_SYMBOL(ip6mr_rule_default);
 284#else
 285#define ip6mr_for_each_table(mrt, net) \
 286	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
 287
 288static struct mr_table *ip6mr_mr_table_iter(struct net *net,
 289					    struct mr_table *mrt)
 290{
 291	if (!mrt)
 292		return net->ipv6.mrt6;
 293	return NULL;
 294}
 295
 296static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
 297{
 298	return net->ipv6.mrt6;
 299}
 300
 301static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 302			    struct mr_table **mrt)
 303{
 304	*mrt = net->ipv6.mrt6;
 305	return 0;
 306}
 307
 308static int __net_init ip6mr_rules_init(struct net *net)
 309{
 310	struct mr_table *mrt;
 311
 312	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 313	if (IS_ERR(mrt))
 314		return PTR_ERR(mrt);
 315	net->ipv6.mrt6 = mrt;
 316	return 0;
 317}
 318
 319static void __net_exit ip6mr_rules_exit(struct net *net)
 320{
 321	rtnl_lock();
 322	ip6mr_free_table(net->ipv6.mrt6);
 323	net->ipv6.mrt6 = NULL;
 324	rtnl_unlock();
 325}
 
 326
 327static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
 328{
 329	return 0;
 330}
 
 
 
 
 331
 332static unsigned int ip6mr_rules_seq_read(struct net *net)
 333{
 334	return 0;
 335}
 336#endif
 337
 338static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
 339			  const void *ptr)
 340{
 341	const struct mfc6_cache_cmp_arg *cmparg = arg->key;
 342	struct mfc6_cache *c = (struct mfc6_cache *)ptr;
 343
 344	return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
 345	       !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
 346}
 347
 348static const struct rhashtable_params ip6mr_rht_params = {
 349	.head_offset = offsetof(struct mr_mfc, mnode),
 350	.key_offset = offsetof(struct mfc6_cache, cmparg),
 351	.key_len = sizeof(struct mfc6_cache_cmp_arg),
 352	.nelem_hint = 3,
 353	.obj_cmpfn = ip6mr_hash_cmp,
 354	.automatic_shrinking = true,
 355};
 356
 357static void ip6mr_new_table_set(struct mr_table *mrt,
 358				struct net *net)
 359{
 360#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 361	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
 362#endif
 
 
 
 
 
 
 
 
 363}
 364
 365static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
 366	.mf6c_origin = IN6ADDR_ANY_INIT,
 367	.mf6c_mcastgrp = IN6ADDR_ANY_INIT,
 
 
 
 
 368};
 369
 370static struct mr_table_ops ip6mr_mr_table_ops = {
 371	.rht_params = &ip6mr_rht_params,
 372	.cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
 373};
 374
 375static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
 
 376{
 377	struct mr_table *mrt;
 
 
 
 
 
 
 
 
 
 
 378
 379	mrt = ip6mr_get_table(net, id);
 380	if (mrt)
 381		return mrt;
 
 
 
 382
 383	return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
 384			      ipmr_expire_process, ip6mr_new_table_set);
 385}
 386
 387static void ip6mr_free_table(struct mr_table *mrt)
 
 
 
 
 
 
 
 
 
 
 
 
 388{
 389	del_timer_sync(&mrt->ipmr_expire_timer);
 390	mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
 391				 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
 392	rhltable_destroy(&mrt->mfc_hash);
 393	kfree(mrt);
 
 
 
 
 394}
 395
 396#ifdef CONFIG_PROC_FS
 397/* The /proc interfaces to multicast routing
 398 * /proc/ip6_mr_cache /proc/ip6_mr_vif
 399 */
 400
 401static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
 402	__acquires(mrt_lock)
 403{
 404	struct mr_vif_iter *iter = seq->private;
 405	struct net *net = seq_file_net(seq);
 406	struct mr_table *mrt;
 407
 408	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 409	if (!mrt)
 410		return ERR_PTR(-ENOENT);
 411
 412	iter->mrt = mrt;
 413
 414	read_lock(&mrt_lock);
 415	return mr_vif_seq_start(seq, pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416}
 417
 418static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
 419	__releases(mrt_lock)
 420{
 421	read_unlock(&mrt_lock);
 422}
 423
 424static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
 425{
 426	struct mr_vif_iter *iter = seq->private;
 427	struct mr_table *mrt = iter->mrt;
 428
 429	if (v == SEQ_START_TOKEN) {
 430		seq_puts(seq,
 431			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
 432	} else {
 433		const struct vif_device *vif = v;
 434		const char *name = vif->dev ? vif->dev->name : "none";
 435
 436		seq_printf(seq,
 437			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
 438			   vif - mrt->vif_table,
 439			   name, vif->bytes_in, vif->pkt_in,
 440			   vif->bytes_out, vif->pkt_out,
 441			   vif->flags);
 442	}
 443	return 0;
 444}
 445
 446static const struct seq_operations ip6mr_vif_seq_ops = {
 447	.start = ip6mr_vif_seq_start,
 448	.next  = mr_vif_seq_next,
 449	.stop  = ip6mr_vif_seq_stop,
 450	.show  = ip6mr_vif_seq_show,
 451};
 452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 454{
 
 455	struct net *net = seq_file_net(seq);
 456	struct mr_table *mrt;
 457
 458	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 459	if (!mrt)
 460		return ERR_PTR(-ENOENT);
 461
 462	return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463}
 464
 465static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
 466{
 467	int n;
 468
 469	if (v == SEQ_START_TOKEN) {
 470		seq_puts(seq,
 471			 "Group                            "
 472			 "Origin                           "
 473			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
 474	} else {
 475		const struct mfc6_cache *mfc = v;
 476		const struct mr_mfc_iter *it = seq->private;
 477		struct mr_table *mrt = it->mrt;
 478
 479		seq_printf(seq, "%pI6 %pI6 %-3hd",
 480			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
 481			   mfc->_c.mfc_parent);
 482
 483		if (it->cache != &mrt->mfc_unres_queue) {
 484			seq_printf(seq, " %8lu %8lu %8lu",
 485				   mfc->_c.mfc_un.res.pkt,
 486				   mfc->_c.mfc_un.res.bytes,
 487				   mfc->_c.mfc_un.res.wrong_if);
 488			for (n = mfc->_c.mfc_un.res.minvif;
 489			     n < mfc->_c.mfc_un.res.maxvif; n++) {
 490				if (VIF_EXISTS(mrt, n) &&
 491				    mfc->_c.mfc_un.res.ttls[n] < 255)
 492					seq_printf(seq,
 493						   " %2d:%-3d", n,
 494						   mfc->_c.mfc_un.res.ttls[n]);
 495			}
 496		} else {
 497			/* unresolved mfc_caches don't contain
 498			 * pkt, bytes and wrong_if values
 499			 */
 500			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
 501		}
 502		seq_putc(seq, '\n');
 503	}
 504	return 0;
 505}
 506
 507static const struct seq_operations ipmr_mfc_seq_ops = {
 508	.start = ipmr_mfc_seq_start,
 509	.next  = mr_mfc_seq_next,
 510	.stop  = mr_mfc_seq_stop,
 511	.show  = ipmr_mfc_seq_show,
 512};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513#endif
 514
 515#ifdef CONFIG_IPV6_PIMSM_V2
 516
 517static int pim6_rcv(struct sk_buff *skb)
 518{
 519	struct pimreghdr *pim;
 520	struct ipv6hdr   *encap;
 521	struct net_device  *reg_dev = NULL;
 522	struct net *net = dev_net(skb->dev);
 523	struct mr_table *mrt;
 524	struct flowi6 fl6 = {
 525		.flowi6_iif	= skb->dev->ifindex,
 526		.flowi6_mark	= skb->mark,
 527	};
 528	int reg_vif_num;
 529
 530	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
 531		goto drop;
 532
 533	pim = (struct pimreghdr *)skb_transport_header(skb);
 534	if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
 535	    (pim->flags & PIM_NULL_REGISTER) ||
 536	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 537			     sizeof(*pim), IPPROTO_PIM,
 538			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
 539	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
 540		goto drop;
 541
 542	/* check if the inner packet is destined to mcast group */
 543	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
 544				   sizeof(*pim));
 545
 546	if (!ipv6_addr_is_multicast(&encap->daddr) ||
 547	    encap->payload_len == 0 ||
 548	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
 549		goto drop;
 550
 551	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 552		goto drop;
 553	reg_vif_num = mrt->mroute_reg_vif_num;
 554
 555	read_lock(&mrt_lock);
 556	if (reg_vif_num >= 0)
 557		reg_dev = mrt->vif_table[reg_vif_num].dev;
 558	if (reg_dev)
 559		dev_hold(reg_dev);
 560	read_unlock(&mrt_lock);
 561
 562	if (!reg_dev)
 563		goto drop;
 564
 565	skb->mac_header = skb->network_header;
 566	skb_pull(skb, (u8 *)encap - skb->data);
 567	skb_reset_network_header(skb);
 568	skb->protocol = htons(ETH_P_IPV6);
 569	skb->ip_summed = CHECKSUM_NONE;
 
 570
 571	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
 572
 573	netif_rx(skb);
 574
 575	dev_put(reg_dev);
 576	return 0;
 577 drop:
 578	kfree_skb(skb);
 579	return 0;
 580}
 581
 582static const struct inet6_protocol pim6_protocol = {
 583	.handler	=	pim6_rcv,
 584};
 585
 586/* Service routines creating virtual interfaces: PIMREG */
 587
 588static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
 589				      struct net_device *dev)
 590{
 591	struct net *net = dev_net(dev);
 592	struct mr_table *mrt;
 593	struct flowi6 fl6 = {
 594		.flowi6_oif	= dev->ifindex,
 595		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 596		.flowi6_mark	= skb->mark,
 597	};
 
 598
 599	if (!pskb_inet_may_pull(skb))
 600		goto tx_err;
 601
 602	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 603		goto tx_err;
 604
 605	read_lock(&mrt_lock);
 606	dev->stats.tx_bytes += skb->len;
 607	dev->stats.tx_packets++;
 608	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
 609	read_unlock(&mrt_lock);
 610	kfree_skb(skb);
 611	return NETDEV_TX_OK;
 612
 613tx_err:
 614	dev->stats.tx_errors++;
 615	kfree_skb(skb);
 616	return NETDEV_TX_OK;
 617}
 618
 619static int reg_vif_get_iflink(const struct net_device *dev)
 620{
 621	return 0;
 622}
 623
 624static const struct net_device_ops reg_vif_netdev_ops = {
 625	.ndo_start_xmit	= reg_vif_xmit,
 626	.ndo_get_iflink = reg_vif_get_iflink,
 627};
 628
 629static void reg_vif_setup(struct net_device *dev)
 630{
 631	dev->type		= ARPHRD_PIMREG;
 632	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
 633	dev->flags		= IFF_NOARP;
 634	dev->netdev_ops		= &reg_vif_netdev_ops;
 635	dev->needs_free_netdev	= true;
 636	dev->features		|= NETIF_F_NETNS_LOCAL;
 637}
 638
 639static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
 640{
 641	struct net_device *dev;
 642	char name[IFNAMSIZ];
 643
 644	if (mrt->id == RT6_TABLE_DFLT)
 645		sprintf(name, "pim6reg");
 646	else
 647		sprintf(name, "pim6reg%u", mrt->id);
 648
 649	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
 650	if (!dev)
 651		return NULL;
 652
 653	dev_net_set(dev, net);
 654
 655	if (register_netdevice(dev)) {
 656		free_netdev(dev);
 657		return NULL;
 658	}
 
 659
 660	if (dev_open(dev, NULL))
 661		goto failure;
 662
 663	dev_hold(dev);
 664	return dev;
 665
 666failure:
 
 
 
 
 667	unregister_netdevice(dev);
 668	return NULL;
 669}
 670#endif
 671
 672static int call_ip6mr_vif_entry_notifiers(struct net *net,
 673					  enum fib_event_type event_type,
 674					  struct vif_device *vif,
 675					  mifi_t vif_index, u32 tb_id)
 676{
 677	return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
 678				     vif, vif_index, tb_id,
 679				     &net->ipv6.ipmr_seq);
 680}
 681
 682static int call_ip6mr_mfc_entry_notifiers(struct net *net,
 683					  enum fib_event_type event_type,
 684					  struct mfc6_cache *mfc, u32 tb_id)
 685{
 686	return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
 687				     &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
 688}
 689
 690/* Delete a VIF entry */
 691static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
 692		       struct list_head *head)
 693{
 694	struct vif_device *v;
 695	struct net_device *dev;
 696	struct inet6_dev *in6_dev;
 697
 698	if (vifi < 0 || vifi >= mrt->maxvif)
 699		return -EADDRNOTAVAIL;
 700
 701	v = &mrt->vif_table[vifi];
 702
 703	if (VIF_EXISTS(mrt, vifi))
 704		call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
 705					       FIB_EVENT_VIF_DEL, v, vifi,
 706					       mrt->id);
 707
 708	write_lock_bh(&mrt_lock);
 709	dev = v->dev;
 710	v->dev = NULL;
 711
 712	if (!dev) {
 713		write_unlock_bh(&mrt_lock);
 714		return -EADDRNOTAVAIL;
 715	}
 716
 717#ifdef CONFIG_IPV6_PIMSM_V2
 718	if (vifi == mrt->mroute_reg_vif_num)
 719		mrt->mroute_reg_vif_num = -1;
 720#endif
 721
 722	if (vifi + 1 == mrt->maxvif) {
 723		int tmp;
 724		for (tmp = vifi - 1; tmp >= 0; tmp--) {
 725			if (VIF_EXISTS(mrt, tmp))
 726				break;
 727		}
 728		mrt->maxvif = tmp + 1;
 729	}
 730
 731	write_unlock_bh(&mrt_lock);
 732
 733	dev_set_allmulti(dev, -1);
 734
 735	in6_dev = __in6_dev_get(dev);
 736	if (in6_dev) {
 737		in6_dev->cnf.mc_forwarding--;
 738		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
 739					     NETCONFA_MC_FORWARDING,
 740					     dev->ifindex, &in6_dev->cnf);
 741	}
 742
 743	if ((v->flags & MIFF_REGISTER) && !notify)
 744		unregister_netdevice_queue(dev, head);
 745
 746	dev_put(dev);
 747	return 0;
 748}
 749
 750static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
 751{
 752	struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
 753
 754	kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
 755}
 756
 757static inline void ip6mr_cache_free(struct mfc6_cache *c)
 758{
 759	call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
 760}
 761
 762/* Destroy an unresolved cache entry, killing queued skbs
 763   and reporting error to netlink readers.
 764 */
 765
 766static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
 767{
 768	struct net *net = read_pnet(&mrt->net);
 769	struct sk_buff *skb;
 770
 771	atomic_dec(&mrt->cache_resolve_queue_len);
 772
 773	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
 774		if (ipv6_hdr(skb)->version == 0) {
 775			struct nlmsghdr *nlh = skb_pull(skb,
 776							sizeof(struct ipv6hdr));
 777			nlh->nlmsg_type = NLMSG_ERROR;
 778			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
 779			skb_trim(skb, nlh->nlmsg_len);
 780			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
 781			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
 782		} else
 783			kfree_skb(skb);
 784	}
 785
 786	ip6mr_cache_free(c);
 787}
 788
 789
 790/* Timer process for all the unresolved queue. */
 791
 792static void ipmr_do_expire_process(struct mr_table *mrt)
 793{
 794	unsigned long now = jiffies;
 795	unsigned long expires = 10 * HZ;
 796	struct mr_mfc *c, *next;
 797
 798	list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
 799		if (time_after(c->mfc_un.unres.expires, now)) {
 800			/* not yet... */
 801			unsigned long interval = c->mfc_un.unres.expires - now;
 802			if (interval < expires)
 803				expires = interval;
 804			continue;
 805		}
 806
 807		list_del(&c->list);
 808		mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
 809		ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
 810	}
 811
 812	if (!list_empty(&mrt->mfc_unres_queue))
 813		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 814}
 815
 816static void ipmr_expire_process(struct timer_list *t)
 817{
 818	struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
 819
 820	if (!spin_trylock(&mfc_unres_lock)) {
 821		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
 822		return;
 823	}
 824
 825	if (!list_empty(&mrt->mfc_unres_queue))
 826		ipmr_do_expire_process(mrt);
 827
 828	spin_unlock(&mfc_unres_lock);
 829}
 830
 831/* Fill oifs list. It is called under write locked mrt_lock. */
 832
 833static void ip6mr_update_thresholds(struct mr_table *mrt,
 834				    struct mr_mfc *cache,
 835				    unsigned char *ttls)
 836{
 837	int vifi;
 838
 839	cache->mfc_un.res.minvif = MAXMIFS;
 840	cache->mfc_un.res.maxvif = 0;
 841	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
 842
 843	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
 844		if (VIF_EXISTS(mrt, vifi) &&
 845		    ttls[vifi] && ttls[vifi] < 255) {
 846			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
 847			if (cache->mfc_un.res.minvif > vifi)
 848				cache->mfc_un.res.minvif = vifi;
 849			if (cache->mfc_un.res.maxvif <= vifi)
 850				cache->mfc_un.res.maxvif = vifi + 1;
 851		}
 852	}
 853	cache->mfc_un.res.lastuse = jiffies;
 854}
 855
 856static int mif6_add(struct net *net, struct mr_table *mrt,
 857		    struct mif6ctl *vifc, int mrtsock)
 858{
 859	int vifi = vifc->mif6c_mifi;
 860	struct vif_device *v = &mrt->vif_table[vifi];
 861	struct net_device *dev;
 862	struct inet6_dev *in6_dev;
 863	int err;
 864
 865	/* Is vif busy ? */
 866	if (VIF_EXISTS(mrt, vifi))
 867		return -EADDRINUSE;
 868
 869	switch (vifc->mif6c_flags) {
 870#ifdef CONFIG_IPV6_PIMSM_V2
 871	case MIFF_REGISTER:
 872		/*
 873		 * Special Purpose VIF in PIM
 874		 * All the packets will be sent to the daemon
 875		 */
 876		if (mrt->mroute_reg_vif_num >= 0)
 877			return -EADDRINUSE;
 878		dev = ip6mr_reg_vif(net, mrt);
 879		if (!dev)
 880			return -ENOBUFS;
 881		err = dev_set_allmulti(dev, 1);
 882		if (err) {
 883			unregister_netdevice(dev);
 884			dev_put(dev);
 885			return err;
 886		}
 887		break;
 888#endif
 889	case 0:
 890		dev = dev_get_by_index(net, vifc->mif6c_pifi);
 891		if (!dev)
 892			return -EADDRNOTAVAIL;
 893		err = dev_set_allmulti(dev, 1);
 894		if (err) {
 895			dev_put(dev);
 896			return err;
 897		}
 898		break;
 899	default:
 900		return -EINVAL;
 901	}
 902
 903	in6_dev = __in6_dev_get(dev);
 904	if (in6_dev) {
 905		in6_dev->cnf.mc_forwarding++;
 906		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
 907					     NETCONFA_MC_FORWARDING,
 908					     dev->ifindex, &in6_dev->cnf);
 909	}
 910
 911	/* Fill in the VIF structures */
 912	vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
 913			vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
 914			MIFF_REGISTER);
 
 
 
 
 
 
 
 
 
 
 
 915
 916	/* And finish update writing critical data */
 917	write_lock_bh(&mrt_lock);
 918	v->dev = dev;
 919#ifdef CONFIG_IPV6_PIMSM_V2
 920	if (v->flags & MIFF_REGISTER)
 921		mrt->mroute_reg_vif_num = vifi;
 922#endif
 923	if (vifi + 1 > mrt->maxvif)
 924		mrt->maxvif = vifi + 1;
 925	write_unlock_bh(&mrt_lock);
 926	call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
 927				       v, vifi, mrt->id);
 928	return 0;
 929}
 930
 931static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
 932					   const struct in6_addr *origin,
 933					   const struct in6_addr *mcastgrp)
 934{
 935	struct mfc6_cache_cmp_arg arg = {
 936		.mf6c_origin = *origin,
 937		.mf6c_mcastgrp = *mcastgrp,
 938	};
 939
 940	return mr_mfc_find(mrt, &arg);
 
 
 
 
 
 941}
 942
 943/* Look for a (*,G) entry */
 944static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
 945					       struct in6_addr *mcastgrp,
 946					       mifi_t mifi)
 947{
 948	struct mfc6_cache_cmp_arg arg = {
 949		.mf6c_origin = in6addr_any,
 950		.mf6c_mcastgrp = *mcastgrp,
 951	};
 952
 953	if (ipv6_addr_any(mcastgrp))
 954		return mr_mfc_find_any_parent(mrt, mifi);
 955	return mr_mfc_find_any(mrt, mifi, &arg);
 956}
 957
 958/* Look for a (S,G,iif) entry if parent != -1 */
 959static struct mfc6_cache *
 960ip6mr_cache_find_parent(struct mr_table *mrt,
 961			const struct in6_addr *origin,
 962			const struct in6_addr *mcastgrp,
 963			int parent)
 964{
 965	struct mfc6_cache_cmp_arg arg = {
 966		.mf6c_origin = *origin,
 967		.mf6c_mcastgrp = *mcastgrp,
 968	};
 969
 970	return mr_mfc_find_parent(mrt, &arg, parent);
 971}
 972
 973/* Allocate a multicast cache entry */
 974static struct mfc6_cache *ip6mr_cache_alloc(void)
 975{
 976	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
 977	if (!c)
 978		return NULL;
 979	c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
 980	c->_c.mfc_un.res.minvif = MAXMIFS;
 981	c->_c.free = ip6mr_cache_free_rcu;
 982	refcount_set(&c->_c.mfc_un.res.refcount, 1);
 983	return c;
 984}
 985
 986static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
 987{
 988	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
 989	if (!c)
 990		return NULL;
 991	skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
 992	c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
 993	return c;
 994}
 995
 996/*
 997 *	A cache entry has gone into a resolved state from queued
 998 */
 999
1000static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1001				struct mfc6_cache *uc, struct mfc6_cache *c)
1002{
1003	struct sk_buff *skb;
1004
1005	/*
1006	 *	Play the pending entries through our router
1007	 */
1008
1009	while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1010		if (ipv6_hdr(skb)->version == 0) {
1011			struct nlmsghdr *nlh = skb_pull(skb,
1012							sizeof(struct ipv6hdr));
1013
1014			if (mr_fill_mroute(mrt, skb, &c->_c,
1015					   nlmsg_data(nlh)) > 0) {
1016				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1017			} else {
1018				nlh->nlmsg_type = NLMSG_ERROR;
1019				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1020				skb_trim(skb, nlh->nlmsg_len);
1021				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1022			}
1023			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1024		} else
1025			ip6_mr_forward(net, mrt, skb->dev, skb, c);
1026	}
1027}
1028
1029/*
1030 *	Bounce a cache query up to pim6sd and netlink.
 
1031 *
1032 *	Called under mrt_lock.
1033 */
1034
1035static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1036			      mifi_t mifi, int assert)
1037{
1038	struct sock *mroute6_sk;
1039	struct sk_buff *skb;
1040	struct mrt6msg *msg;
1041	int ret;
1042
1043#ifdef CONFIG_IPV6_PIMSM_V2
1044	if (assert == MRT6MSG_WHOLEPKT)
1045		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1046						+sizeof(*msg));
1047	else
1048#endif
1049		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1050
1051	if (!skb)
1052		return -ENOBUFS;
1053
1054	/* I suppose that internal messages
1055	 * do not require checksums */
1056
1057	skb->ip_summed = CHECKSUM_UNNECESSARY;
1058
1059#ifdef CONFIG_IPV6_PIMSM_V2
1060	if (assert == MRT6MSG_WHOLEPKT) {
1061		/* Ugly, but we have no choice with this interface.
1062		   Duplicate old header, fix length etc.
1063		   And all this only to mangle msg->im6_msgtype and
1064		   to set msg->im6_mbz to "mbz" :-)
1065		 */
1066		skb_push(skb, -skb_network_offset(pkt));
1067
1068		skb_push(skb, sizeof(*msg));
1069		skb_reset_transport_header(skb);
1070		msg = (struct mrt6msg *)skb_transport_header(skb);
1071		msg->im6_mbz = 0;
1072		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1073		msg->im6_mif = mrt->mroute_reg_vif_num;
1074		msg->im6_pad = 0;
1075		msg->im6_src = ipv6_hdr(pkt)->saddr;
1076		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1077
1078		skb->ip_summed = CHECKSUM_UNNECESSARY;
1079	} else
1080#endif
1081	{
1082	/*
1083	 *	Copy the IP header
1084	 */
1085
1086	skb_put(skb, sizeof(struct ipv6hdr));
1087	skb_reset_network_header(skb);
1088	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1089
1090	/*
1091	 *	Add our header
1092	 */
1093	skb_put(skb, sizeof(*msg));
1094	skb_reset_transport_header(skb);
1095	msg = (struct mrt6msg *)skb_transport_header(skb);
1096
1097	msg->im6_mbz = 0;
1098	msg->im6_msgtype = assert;
1099	msg->im6_mif = mifi;
1100	msg->im6_pad = 0;
1101	msg->im6_src = ipv6_hdr(pkt)->saddr;
1102	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1103
1104	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1105	skb->ip_summed = CHECKSUM_UNNECESSARY;
1106	}
1107
1108	rcu_read_lock();
1109	mroute6_sk = rcu_dereference(mrt->mroute_sk);
1110	if (!mroute6_sk) {
1111		rcu_read_unlock();
1112		kfree_skb(skb);
1113		return -EINVAL;
1114	}
1115
1116	mrt6msg_netlink_event(mrt, skb);
1117
1118	/* Deliver to user space multicast routing algorithms */
1119	ret = sock_queue_rcv_skb(mroute6_sk, skb);
1120	rcu_read_unlock();
1121	if (ret < 0) {
1122		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
 
1123		kfree_skb(skb);
1124	}
1125
1126	return ret;
1127}
1128
1129/* Queue a packet for resolution. It gets locked cache entry! */
1130static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1131				  struct sk_buff *skb, struct net_device *dev)
 
 
 
1132{
1133	struct mfc6_cache *c;
1134	bool found = false;
1135	int err;
 
1136
1137	spin_lock_bh(&mfc_unres_lock);
1138	list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1139		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1140		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1141			found = true;
1142			break;
1143		}
1144	}
1145
1146	if (!found) {
1147		/*
1148		 *	Create a new entry if allowable
1149		 */
1150
1151		c = ip6mr_cache_alloc_unres();
1152		if (!c) {
1153			spin_unlock_bh(&mfc_unres_lock);
1154
1155			kfree_skb(skb);
1156			return -ENOBUFS;
1157		}
1158
1159		/* Fill in the new cache entry */
1160		c->_c.mfc_parent = -1;
 
 
1161		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1162		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1163
1164		/*
1165		 *	Reflect first query at pim6sd
1166		 */
1167		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1168		if (err < 0) {
1169			/* If the report failed throw the cache entry
1170			   out - Brad Parker
1171			 */
1172			spin_unlock_bh(&mfc_unres_lock);
1173
1174			ip6mr_cache_free(c);
1175			kfree_skb(skb);
1176			return err;
1177		}
1178
1179		atomic_inc(&mrt->cache_resolve_queue_len);
1180		list_add(&c->_c.list, &mrt->mfc_unres_queue);
1181		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1182
1183		ipmr_do_expire_process(mrt);
1184	}
1185
1186	/* See if we can append the packet */
1187	if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
 
 
1188		kfree_skb(skb);
1189		err = -ENOBUFS;
1190	} else {
1191		if (dev) {
1192			skb->dev = dev;
1193			skb->skb_iif = dev->ifindex;
1194		}
1195		skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1196		err = 0;
1197	}
1198
1199	spin_unlock_bh(&mfc_unres_lock);
1200	return err;
1201}
1202
1203/*
1204 *	MFC6 cache manipulation by user space
1205 */
1206
1207static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1208			    int parent)
1209{
1210	struct mfc6_cache *c;
 
 
 
1211
1212	/* The entries are added/deleted only under RTNL */
1213	rcu_read_lock();
1214	c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1215				    &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1216	rcu_read_unlock();
1217	if (!c)
1218		return -ENOENT;
1219	rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1220	list_del_rcu(&c->_c.list);
1221
1222	call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1223				       FIB_EVENT_ENTRY_DEL, c, mrt->id);
1224	mr6_netlink_event(mrt, c, RTM_DELROUTE);
1225	mr_cache_put(&c->_c);
1226	return 0;
1227}
1228
1229static int ip6mr_device_event(struct notifier_block *this,
1230			      unsigned long event, void *ptr)
1231{
1232	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1233	struct net *net = dev_net(dev);
1234	struct mr_table *mrt;
1235	struct vif_device *v;
1236	int ct;
 
1237
1238	if (event != NETDEV_UNREGISTER)
1239		return NOTIFY_DONE;
1240
1241	ip6mr_for_each_table(mrt, net) {
1242		v = &mrt->vif_table[0];
1243		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1244			if (v->dev == dev)
1245				mif6_delete(mrt, ct, 1, NULL);
1246		}
1247	}
 
1248
1249	return NOTIFY_DONE;
1250}
1251
1252static unsigned int ip6mr_seq_read(struct net *net)
1253{
1254	ASSERT_RTNL();
1255
1256	return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1257}
1258
1259static int ip6mr_dump(struct net *net, struct notifier_block *nb)
1260{
1261	return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1262		       ip6mr_mr_table_iter, &mrt_lock);
1263}
1264
1265static struct notifier_block ip6_mr_notifier = {
1266	.notifier_call = ip6mr_device_event
1267};
1268
1269static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1270	.family		= RTNL_FAMILY_IP6MR,
1271	.fib_seq_read	= ip6mr_seq_read,
1272	.fib_dump	= ip6mr_dump,
1273	.owner		= THIS_MODULE,
1274};
1275
1276static int __net_init ip6mr_notifier_init(struct net *net)
1277{
1278	struct fib_notifier_ops *ops;
1279
1280	net->ipv6.ipmr_seq = 0;
1281
1282	ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1283	if (IS_ERR(ops))
1284		return PTR_ERR(ops);
1285
1286	net->ipv6.ip6mr_notifier_ops = ops;
1287
1288	return 0;
1289}
1290
1291static void __net_exit ip6mr_notifier_exit(struct net *net)
1292{
1293	fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1294	net->ipv6.ip6mr_notifier_ops = NULL;
1295}
1296
1297/* Setup for IP multicast routing */
1298static int __net_init ip6mr_net_init(struct net *net)
1299{
1300	int err;
1301
1302	err = ip6mr_notifier_init(net);
1303	if (err)
1304		return err;
1305
1306	err = ip6mr_rules_init(net);
1307	if (err < 0)
1308		goto ip6mr_rules_fail;
1309
1310#ifdef CONFIG_PROC_FS
1311	err = -ENOMEM;
1312	if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1313			sizeof(struct mr_vif_iter)))
1314		goto proc_vif_fail;
1315	if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1316			sizeof(struct mr_mfc_iter)))
1317		goto proc_cache_fail;
1318#endif
1319
1320	return 0;
1321
1322#ifdef CONFIG_PROC_FS
1323proc_cache_fail:
1324	remove_proc_entry("ip6_mr_vif", net->proc_net);
1325proc_vif_fail:
1326	ip6mr_rules_exit(net);
1327#endif
1328ip6mr_rules_fail:
1329	ip6mr_notifier_exit(net);
1330	return err;
1331}
1332
1333static void __net_exit ip6mr_net_exit(struct net *net)
1334{
1335#ifdef CONFIG_PROC_FS
1336	remove_proc_entry("ip6_mr_cache", net->proc_net);
1337	remove_proc_entry("ip6_mr_vif", net->proc_net);
1338#endif
1339	ip6mr_rules_exit(net);
1340	ip6mr_notifier_exit(net);
1341}
1342
1343static struct pernet_operations ip6mr_net_ops = {
1344	.init = ip6mr_net_init,
1345	.exit = ip6mr_net_exit,
1346};
1347
1348int __init ip6_mr_init(void)
1349{
1350	int err;
1351
1352	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1353				       sizeof(struct mfc6_cache),
1354				       0, SLAB_HWCACHE_ALIGN,
1355				       NULL);
1356	if (!mrt_cachep)
1357		return -ENOMEM;
1358
1359	err = register_pernet_subsys(&ip6mr_net_ops);
1360	if (err)
1361		goto reg_pernet_fail;
1362
1363	err = register_netdevice_notifier(&ip6_mr_notifier);
1364	if (err)
1365		goto reg_notif_fail;
1366#ifdef CONFIG_IPV6_PIMSM_V2
1367	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1368		pr_err("%s: can't add PIM protocol\n", __func__);
1369		err = -EAGAIN;
1370		goto add_proto_fail;
1371	}
1372#endif
1373	err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1374				   NULL, ip6mr_rtm_dumproute, 0);
1375	if (err == 0)
1376		return 0;
1377
1378#ifdef CONFIG_IPV6_PIMSM_V2
1379	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1380add_proto_fail:
1381	unregister_netdevice_notifier(&ip6_mr_notifier);
1382#endif
1383reg_notif_fail:
1384	unregister_pernet_subsys(&ip6mr_net_ops);
1385reg_pernet_fail:
1386	kmem_cache_destroy(mrt_cachep);
1387	return err;
1388}
1389
1390void ip6_mr_cleanup(void)
1391{
1392	rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1393#ifdef CONFIG_IPV6_PIMSM_V2
1394	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1395#endif
1396	unregister_netdevice_notifier(&ip6_mr_notifier);
1397	unregister_pernet_subsys(&ip6mr_net_ops);
1398	kmem_cache_destroy(mrt_cachep);
1399}
1400
1401static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1402			 struct mf6cctl *mfc, int mrtsock, int parent)
1403{
 
 
 
1404	unsigned char ttls[MAXMIFS];
1405	struct mfc6_cache *uc, *c;
1406	struct mr_mfc *_uc;
1407	bool found;
1408	int i, err;
1409
1410	if (mfc->mf6cc_parent >= MAXMIFS)
1411		return -ENFILE;
1412
1413	memset(ttls, 255, MAXMIFS);
1414	for (i = 0; i < MAXMIFS; i++) {
1415		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1416			ttls[i] = 1;
 
1417	}
1418
1419	/* The entries are added/deleted only under RTNL */
1420	rcu_read_lock();
1421	c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1422				    &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1423	rcu_read_unlock();
1424	if (c) {
 
 
 
 
 
1425		write_lock_bh(&mrt_lock);
1426		c->_c.mfc_parent = mfc->mf6cc_parent;
1427		ip6mr_update_thresholds(mrt, &c->_c, ttls);
1428		if (!mrtsock)
1429			c->_c.mfc_flags |= MFC_STATIC;
1430		write_unlock_bh(&mrt_lock);
1431		call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1432					       c, mrt->id);
1433		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1434		return 0;
1435	}
1436
1437	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1438	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1439		return -EINVAL;
1440
1441	c = ip6mr_cache_alloc();
1442	if (!c)
1443		return -ENOMEM;
1444
1445	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1446	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1447	c->_c.mfc_parent = mfc->mf6cc_parent;
1448	ip6mr_update_thresholds(mrt, &c->_c, ttls);
1449	if (!mrtsock)
1450		c->_c.mfc_flags |= MFC_STATIC;
1451
1452	err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1453				  ip6mr_rht_params);
1454	if (err) {
1455		pr_err("ip6mr: rhtable insert error %d\n", err);
1456		ip6mr_cache_free(c);
1457		return err;
1458	}
1459	list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1460
1461	/* Check to see if we resolved a queued list. If so we
1462	 * need to send on the frames and tidy up.
 
1463	 */
1464	found = false;
1465	spin_lock_bh(&mfc_unres_lock);
1466	list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1467		uc = (struct mfc6_cache *)_uc;
1468		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1469		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1470			list_del(&_uc->list);
1471			atomic_dec(&mrt->cache_resolve_queue_len);
1472			found = true;
1473			break;
1474		}
1475	}
1476	if (list_empty(&mrt->mfc_unres_queue))
1477		del_timer(&mrt->ipmr_expire_timer);
1478	spin_unlock_bh(&mfc_unres_lock);
1479
1480	if (found) {
1481		ip6mr_cache_resolve(net, mrt, uc, c);
1482		ip6mr_cache_free(uc);
1483	}
1484	call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1485				       c, mrt->id);
1486	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1487	return 0;
1488}
1489
1490/*
1491 *	Close the multicast socket, and clear the vif tables etc
1492 */
1493
1494static void mroute_clean_tables(struct mr_table *mrt, int flags)
1495{
1496	struct mr_mfc *c, *tmp;
1497	LIST_HEAD(list);
1498	int i;
1499
1500	/* Shut down all active vif entries */
1501	if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1502		for (i = 0; i < mrt->maxvif; i++) {
1503			if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1504			     !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1505			    (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1506				continue;
1507			mif6_delete(mrt, i, 0, &list);
1508		}
1509		unregister_netdevice_many(&list);
1510	}
 
1511
1512	/* Wipe the cache */
1513	if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1514		list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1515			if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1516			    (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
 
1517				continue;
1518			rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1519			list_del_rcu(&c->list);
1520			call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1521						       FIB_EVENT_ENTRY_DEL,
1522						       (struct mfc6_cache *)c, mrt->id);
1523			mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1524			mr_cache_put(c);
1525		}
1526	}
1527
1528	if (flags & MRT6_FLUSH_MFC) {
1529		if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1530			spin_lock_bh(&mfc_unres_lock);
1531			list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1532				list_del(&c->list);
1533				mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1534						  RTM_DELROUTE);
1535				ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1536			}
1537			spin_unlock_bh(&mfc_unres_lock);
1538		}
 
1539	}
1540}
1541
1542static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1543{
1544	int err = 0;
1545	struct net *net = sock_net(sk);
1546
1547	rtnl_lock();
1548	write_lock_bh(&mrt_lock);
1549	if (rtnl_dereference(mrt->mroute_sk)) {
1550		err = -EADDRINUSE;
1551	} else {
1552		rcu_assign_pointer(mrt->mroute_sk, sk);
1553		sock_set_flag(sk, SOCK_RCU_FREE);
1554		net->ipv6.devconf_all->mc_forwarding++;
1555	}
 
 
1556	write_unlock_bh(&mrt_lock);
1557
1558	if (!err)
1559		inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1560					     NETCONFA_MC_FORWARDING,
1561					     NETCONFA_IFINDEX_ALL,
1562					     net->ipv6.devconf_all);
1563	rtnl_unlock();
1564
1565	return err;
1566}
1567
1568int ip6mr_sk_done(struct sock *sk)
1569{
1570	int err = -EACCES;
1571	struct net *net = sock_net(sk);
1572	struct mr_table *mrt;
1573
1574	if (sk->sk_type != SOCK_RAW ||
1575	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1576		return err;
1577
1578	rtnl_lock();
1579	ip6mr_for_each_table(mrt, net) {
1580		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1581			write_lock_bh(&mrt_lock);
1582			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1583			/* Note that mroute_sk had SOCK_RCU_FREE set,
1584			 * so the RCU grace period before sk freeing
1585			 * is guaranteed by sk_destruct()
1586			 */
1587			net->ipv6.devconf_all->mc_forwarding--;
1588			write_unlock_bh(&mrt_lock);
1589			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1590						     NETCONFA_MC_FORWARDING,
1591						     NETCONFA_IFINDEX_ALL,
1592						     net->ipv6.devconf_all);
1593
1594			mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
1595			err = 0;
1596			break;
1597		}
1598	}
1599	rtnl_unlock();
1600
1601	return err;
1602}
1603
1604bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1605{
1606	struct mr_table *mrt;
1607	struct flowi6 fl6 = {
1608		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1609		.flowi6_oif	= skb->dev->ifindex,
1610		.flowi6_mark	= skb->mark,
1611	};
1612
1613	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1614		return NULL;
1615
1616	return rcu_access_pointer(mrt->mroute_sk);
1617}
1618EXPORT_SYMBOL(mroute6_is_socket);
1619
1620/*
1621 *	Socket options and virtual interface manipulation. The whole
1622 *	virtual interface system is a complete heap, but unfortunately
1623 *	that's how BSD mrouted happens to think. Maybe one day with a proper
1624 *	MOSPF/PIM router set up we can clean this up.
1625 */
1626
1627int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1628{
1629	int ret, parent = 0;
1630	struct mif6ctl vif;
1631	struct mf6cctl mfc;
1632	mifi_t mifi;
1633	struct net *net = sock_net(sk);
1634	struct mr_table *mrt;
1635
1636	if (sk->sk_type != SOCK_RAW ||
1637	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1638		return -EOPNOTSUPP;
1639
1640	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1641	if (!mrt)
1642		return -ENOENT;
1643
1644	if (optname != MRT6_INIT) {
1645		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1646		    !ns_capable(net->user_ns, CAP_NET_ADMIN))
1647			return -EACCES;
1648	}
1649
1650	switch (optname) {
1651	case MRT6_INIT:
 
 
 
1652		if (optlen < sizeof(int))
1653			return -EINVAL;
1654
1655		return ip6mr_sk_init(mrt, sk);
1656
1657	case MRT6_DONE:
1658		return ip6mr_sk_done(sk);
1659
1660	case MRT6_ADD_MIF:
1661		if (optlen < sizeof(vif))
1662			return -EINVAL;
1663		if (copy_from_user(&vif, optval, sizeof(vif)))
1664			return -EFAULT;
1665		if (vif.mif6c_mifi >= MAXMIFS)
1666			return -ENFILE;
1667		rtnl_lock();
1668		ret = mif6_add(net, mrt, &vif,
1669			       sk == rtnl_dereference(mrt->mroute_sk));
1670		rtnl_unlock();
1671		return ret;
1672
1673	case MRT6_DEL_MIF:
1674		if (optlen < sizeof(mifi_t))
1675			return -EINVAL;
1676		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1677			return -EFAULT;
1678		rtnl_lock();
1679		ret = mif6_delete(mrt, mifi, 0, NULL);
1680		rtnl_unlock();
1681		return ret;
1682
1683	/*
1684	 *	Manipulate the forwarding caches. These live
1685	 *	in a sort of kernel/user symbiosis.
1686	 */
1687	case MRT6_ADD_MFC:
1688	case MRT6_DEL_MFC:
1689		parent = -1;
1690		/* fall through */
1691	case MRT6_ADD_MFC_PROXY:
1692	case MRT6_DEL_MFC_PROXY:
1693		if (optlen < sizeof(mfc))
1694			return -EINVAL;
1695		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1696			return -EFAULT;
1697		if (parent == 0)
1698			parent = mfc.mf6cc_parent;
1699		rtnl_lock();
1700		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1701			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1702		else
1703			ret = ip6mr_mfc_add(net, mrt, &mfc,
1704					    sk ==
1705					    rtnl_dereference(mrt->mroute_sk),
1706					    parent);
1707		rtnl_unlock();
1708		return ret;
1709
1710	case MRT6_FLUSH:
1711	{
1712		int flags;
1713
1714		if (optlen != sizeof(flags))
1715			return -EINVAL;
1716		if (get_user(flags, (int __user *)optval))
1717			return -EFAULT;
1718		rtnl_lock();
1719		mroute_clean_tables(mrt, flags);
1720		rtnl_unlock();
1721		return 0;
1722	}
1723
1724	/*
1725	 *	Control PIM assert (to activate pim will activate assert)
1726	 */
1727	case MRT6_ASSERT:
1728	{
1729		int v;
1730
1731		if (optlen != sizeof(v))
1732			return -EINVAL;
1733		if (get_user(v, (int __user *)optval))
1734			return -EFAULT;
1735		mrt->mroute_do_assert = v;
1736		return 0;
1737	}
1738
1739#ifdef CONFIG_IPV6_PIMSM_V2
1740	case MRT6_PIM:
1741	{
1742		int v;
1743
1744		if (optlen != sizeof(v))
1745			return -EINVAL;
1746		if (get_user(v, (int __user *)optval))
1747			return -EFAULT;
1748		v = !!v;
1749		rtnl_lock();
1750		ret = 0;
1751		if (v != mrt->mroute_do_pim) {
1752			mrt->mroute_do_pim = v;
1753			mrt->mroute_do_assert = v;
1754		}
1755		rtnl_unlock();
1756		return ret;
1757	}
1758
1759#endif
1760#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1761	case MRT6_TABLE:
1762	{
1763		u32 v;
1764
1765		if (optlen != sizeof(u32))
1766			return -EINVAL;
1767		if (get_user(v, (u32 __user *)optval))
1768			return -EFAULT;
1769		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1770		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1771			return -EINVAL;
1772		if (sk == rcu_access_pointer(mrt->mroute_sk))
1773			return -EBUSY;
1774
1775		rtnl_lock();
1776		ret = 0;
1777		mrt = ip6mr_new_table(net, v);
1778		if (IS_ERR(mrt))
1779			ret = PTR_ERR(mrt);
1780		else
1781			raw6_sk(sk)->ip6mr_table = v;
1782		rtnl_unlock();
1783		return ret;
1784	}
1785#endif
1786	/*
1787	 *	Spurious command, or MRT6_VERSION which you cannot
1788	 *	set.
1789	 */
1790	default:
1791		return -ENOPROTOOPT;
1792	}
1793}
1794
1795/*
1796 *	Getsock opt support for the multicast routing system.
1797 */
1798
1799int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1800			  int __user *optlen)
1801{
1802	int olr;
1803	int val;
1804	struct net *net = sock_net(sk);
1805	struct mr_table *mrt;
1806
1807	if (sk->sk_type != SOCK_RAW ||
1808	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1809		return -EOPNOTSUPP;
1810
1811	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1812	if (!mrt)
1813		return -ENOENT;
1814
1815	switch (optname) {
1816	case MRT6_VERSION:
1817		val = 0x0305;
1818		break;
1819#ifdef CONFIG_IPV6_PIMSM_V2
1820	case MRT6_PIM:
1821		val = mrt->mroute_do_pim;
1822		break;
1823#endif
1824	case MRT6_ASSERT:
1825		val = mrt->mroute_do_assert;
1826		break;
1827	default:
1828		return -ENOPROTOOPT;
1829	}
1830
1831	if (get_user(olr, optlen))
1832		return -EFAULT;
1833
1834	olr = min_t(int, olr, sizeof(int));
1835	if (olr < 0)
1836		return -EINVAL;
1837
1838	if (put_user(olr, optlen))
1839		return -EFAULT;
1840	if (copy_to_user(optval, &val, olr))
1841		return -EFAULT;
1842	return 0;
1843}
1844
1845/*
1846 *	The IP multicast ioctl support routines.
1847 */
1848
1849int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1850{
1851	struct sioc_sg_req6 sr;
1852	struct sioc_mif_req6 vr;
1853	struct vif_device *vif;
1854	struct mfc6_cache *c;
1855	struct net *net = sock_net(sk);
1856	struct mr_table *mrt;
1857
1858	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1859	if (!mrt)
1860		return -ENOENT;
1861
1862	switch (cmd) {
1863	case SIOCGETMIFCNT_IN6:
1864		if (copy_from_user(&vr, arg, sizeof(vr)))
1865			return -EFAULT;
1866		if (vr.mifi >= mrt->maxvif)
1867			return -EINVAL;
1868		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1869		read_lock(&mrt_lock);
1870		vif = &mrt->vif_table[vr.mifi];
1871		if (VIF_EXISTS(mrt, vr.mifi)) {
1872			vr.icount = vif->pkt_in;
1873			vr.ocount = vif->pkt_out;
1874			vr.ibytes = vif->bytes_in;
1875			vr.obytes = vif->bytes_out;
1876			read_unlock(&mrt_lock);
1877
1878			if (copy_to_user(arg, &vr, sizeof(vr)))
1879				return -EFAULT;
1880			return 0;
1881		}
1882		read_unlock(&mrt_lock);
1883		return -EADDRNOTAVAIL;
1884	case SIOCGETSGCNT_IN6:
1885		if (copy_from_user(&sr, arg, sizeof(sr)))
1886			return -EFAULT;
1887
1888		rcu_read_lock();
1889		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1890		if (c) {
1891			sr.pktcnt = c->_c.mfc_un.res.pkt;
1892			sr.bytecnt = c->_c.mfc_un.res.bytes;
1893			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1894			rcu_read_unlock();
1895
1896			if (copy_to_user(arg, &sr, sizeof(sr)))
1897				return -EFAULT;
1898			return 0;
1899		}
1900		rcu_read_unlock();
1901		return -EADDRNOTAVAIL;
1902	default:
1903		return -ENOIOCTLCMD;
1904	}
1905}
1906
1907#ifdef CONFIG_COMPAT
1908struct compat_sioc_sg_req6 {
1909	struct sockaddr_in6 src;
1910	struct sockaddr_in6 grp;
1911	compat_ulong_t pktcnt;
1912	compat_ulong_t bytecnt;
1913	compat_ulong_t wrong_if;
1914};
1915
1916struct compat_sioc_mif_req6 {
1917	mifi_t	mifi;
1918	compat_ulong_t icount;
1919	compat_ulong_t ocount;
1920	compat_ulong_t ibytes;
1921	compat_ulong_t obytes;
1922};
1923
1924int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1925{
1926	struct compat_sioc_sg_req6 sr;
1927	struct compat_sioc_mif_req6 vr;
1928	struct vif_device *vif;
1929	struct mfc6_cache *c;
1930	struct net *net = sock_net(sk);
1931	struct mr_table *mrt;
1932
1933	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1934	if (!mrt)
1935		return -ENOENT;
1936
1937	switch (cmd) {
1938	case SIOCGETMIFCNT_IN6:
1939		if (copy_from_user(&vr, arg, sizeof(vr)))
1940			return -EFAULT;
1941		if (vr.mifi >= mrt->maxvif)
1942			return -EINVAL;
1943		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1944		read_lock(&mrt_lock);
1945		vif = &mrt->vif_table[vr.mifi];
1946		if (VIF_EXISTS(mrt, vr.mifi)) {
1947			vr.icount = vif->pkt_in;
1948			vr.ocount = vif->pkt_out;
1949			vr.ibytes = vif->bytes_in;
1950			vr.obytes = vif->bytes_out;
1951			read_unlock(&mrt_lock);
1952
1953			if (copy_to_user(arg, &vr, sizeof(vr)))
1954				return -EFAULT;
1955			return 0;
1956		}
1957		read_unlock(&mrt_lock);
1958		return -EADDRNOTAVAIL;
1959	case SIOCGETSGCNT_IN6:
1960		if (copy_from_user(&sr, arg, sizeof(sr)))
1961			return -EFAULT;
1962
1963		rcu_read_lock();
1964		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1965		if (c) {
1966			sr.pktcnt = c->_c.mfc_un.res.pkt;
1967			sr.bytecnt = c->_c.mfc_un.res.bytes;
1968			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1969			rcu_read_unlock();
1970
1971			if (copy_to_user(arg, &sr, sizeof(sr)))
1972				return -EFAULT;
1973			return 0;
1974		}
1975		rcu_read_unlock();
1976		return -EADDRNOTAVAIL;
1977	default:
1978		return -ENOIOCTLCMD;
1979	}
1980}
1981#endif
1982
1983static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1984{
1985	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1986		      IPSTATS_MIB_OUTFORWDATAGRAMS);
1987	IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1988		      IPSTATS_MIB_OUTOCTETS, skb->len);
1989	return dst_output(net, sk, skb);
1990}
1991
1992/*
1993 *	Processing handlers for ip6mr_forward
1994 */
1995
1996static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1997			  struct sk_buff *skb, int vifi)
1998{
1999	struct ipv6hdr *ipv6h;
2000	struct vif_device *vif = &mrt->vif_table[vifi];
2001	struct net_device *dev;
2002	struct dst_entry *dst;
2003	struct flowi6 fl6;
2004
2005	if (!vif->dev)
2006		goto out_free;
2007
2008#ifdef CONFIG_IPV6_PIMSM_V2
2009	if (vif->flags & MIFF_REGISTER) {
2010		vif->pkt_out++;
2011		vif->bytes_out += skb->len;
2012		vif->dev->stats.tx_bytes += skb->len;
2013		vif->dev->stats.tx_packets++;
2014		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2015		goto out_free;
2016	}
2017#endif
2018
2019	ipv6h = ipv6_hdr(skb);
2020
2021	fl6 = (struct flowi6) {
2022		.flowi6_oif = vif->link,
2023		.daddr = ipv6h->daddr,
2024	};
2025
2026	dst = ip6_route_output(net, NULL, &fl6);
2027	if (dst->error) {
2028		dst_release(dst);
2029		goto out_free;
2030	}
2031
2032	skb_dst_drop(skb);
2033	skb_dst_set(skb, dst);
2034
2035	/*
2036	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2037	 * not only before forwarding, but after forwarding on all output
2038	 * interfaces. It is clear, if mrouter runs a multicasting
2039	 * program, it should receive packets not depending to what interface
2040	 * program is joined.
2041	 * If we will not make it, the program will have to join on all
2042	 * interfaces. On the other hand, multihoming host (or router, but
2043	 * not mrouter) cannot join to more than one interface - it will
2044	 * result in receiving multiple packets.
2045	 */
2046	dev = vif->dev;
2047	skb->dev = dev;
2048	vif->pkt_out++;
2049	vif->bytes_out += skb->len;
2050
2051	/* We are about to write */
2052	/* XXX: extension headers? */
2053	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2054		goto out_free;
2055
2056	ipv6h = ipv6_hdr(skb);
2057	ipv6h->hop_limit--;
2058
2059	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2060
2061	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2062		       net, NULL, skb, skb->dev, dev,
2063		       ip6mr_forward2_finish);
2064
2065out_free:
2066	kfree_skb(skb);
2067	return 0;
2068}
2069
2070static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2071{
2072	int ct;
2073
2074	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2075		if (mrt->vif_table[ct].dev == dev)
2076			break;
2077	}
2078	return ct;
2079}
2080
2081static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2082			   struct net_device *dev, struct sk_buff *skb,
2083			   struct mfc6_cache *c)
2084{
2085	int psend = -1;
2086	int vif, ct;
2087	int true_vifi = ip6mr_find_vif(mrt, dev);
2088
2089	vif = c->_c.mfc_parent;
2090	c->_c.mfc_un.res.pkt++;
2091	c->_c.mfc_un.res.bytes += skb->len;
2092	c->_c.mfc_un.res.lastuse = jiffies;
2093
2094	if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2095		struct mfc6_cache *cache_proxy;
2096
2097		/* For an (*,G) entry, we only check that the incoming
2098		 * interface is part of the static tree.
2099		 */
2100		rcu_read_lock();
2101		cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2102		if (cache_proxy &&
2103		    cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2104			rcu_read_unlock();
2105			goto forward;
2106		}
2107		rcu_read_unlock();
2108	}
2109
2110	/*
2111	 * Wrong interface: drop packet and (maybe) send PIM assert.
2112	 */
2113	if (mrt->vif_table[vif].dev != dev) {
2114		c->_c.mfc_un.res.wrong_if++;
 
 
 
2115
2116		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2117		    /* pimsm uses asserts, when switching from RPT to SPT,
2118		       so that we cannot check that packet arrived on an oif.
2119		       It is bad, but otherwise we would need to move pretty
2120		       large chunk of pimd to kernel. Ough... --ANK
2121		     */
2122		    (mrt->mroute_do_pim ||
2123		     c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2124		    time_after(jiffies,
2125			       c->_c.mfc_un.res.last_assert +
2126			       MFC_ASSERT_THRESH)) {
2127			c->_c.mfc_un.res.last_assert = jiffies;
2128			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2129		}
2130		goto dont_forward;
2131	}
2132
2133forward:
2134	mrt->vif_table[vif].pkt_in++;
2135	mrt->vif_table[vif].bytes_in += skb->len;
2136
2137	/*
2138	 *	Forward the frame
2139	 */
2140	if (ipv6_addr_any(&c->mf6c_origin) &&
2141	    ipv6_addr_any(&c->mf6c_mcastgrp)) {
2142		if (true_vifi >= 0 &&
2143		    true_vifi != c->_c.mfc_parent &&
2144		    ipv6_hdr(skb)->hop_limit >
2145				c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2146			/* It's an (*,*) entry and the packet is not coming from
2147			 * the upstream: forward the packet to the upstream
2148			 * only.
2149			 */
2150			psend = c->_c.mfc_parent;
2151			goto last_forward;
2152		}
2153		goto dont_forward;
2154	}
2155	for (ct = c->_c.mfc_un.res.maxvif - 1;
2156	     ct >= c->_c.mfc_un.res.minvif; ct--) {
2157		/* For (*,G) entry, don't forward to the incoming interface */
2158		if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2159		    ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2160			if (psend != -1) {
2161				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2162				if (skb2)
2163					ip6mr_forward2(net, mrt, skb2, psend);
2164			}
2165			psend = ct;
2166		}
2167	}
2168last_forward:
2169	if (psend != -1) {
2170		ip6mr_forward2(net, mrt, skb, psend);
2171		return;
2172	}
2173
2174dont_forward:
2175	kfree_skb(skb);
 
2176}
2177
2178
2179/*
2180 *	Multicast packets for forwarding arrive here
2181 */
2182
2183int ip6_mr_input(struct sk_buff *skb)
2184{
2185	struct mfc6_cache *cache;
2186	struct net *net = dev_net(skb->dev);
2187	struct mr_table *mrt;
2188	struct flowi6 fl6 = {
2189		.flowi6_iif	= skb->dev->ifindex,
2190		.flowi6_mark	= skb->mark,
2191	};
2192	int err;
2193	struct net_device *dev;
2194
2195	/* skb->dev passed in is the master dev for vrfs.
2196	 * Get the proper interface that does have a vif associated with it.
2197	 */
2198	dev = skb->dev;
2199	if (netif_is_l3_master(skb->dev)) {
2200		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2201		if (!dev) {
2202			kfree_skb(skb);
2203			return -ENODEV;
2204		}
2205	}
2206
2207	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2208	if (err < 0) {
2209		kfree_skb(skb);
2210		return err;
2211	}
2212
2213	read_lock(&mrt_lock);
2214	cache = ip6mr_cache_find(mrt,
2215				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2216	if (!cache) {
2217		int vif = ip6mr_find_vif(mrt, dev);
2218
2219		if (vif >= 0)
2220			cache = ip6mr_cache_find_any(mrt,
2221						     &ipv6_hdr(skb)->daddr,
2222						     vif);
2223	}
2224
2225	/*
2226	 *	No usable cache entry
2227	 */
2228	if (!cache) {
2229		int vif;
2230
2231		vif = ip6mr_find_vif(mrt, dev);
2232		if (vif >= 0) {
2233			int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
2234			read_unlock(&mrt_lock);
2235
2236			return err;
2237		}
2238		read_unlock(&mrt_lock);
2239		kfree_skb(skb);
2240		return -ENODEV;
2241	}
2242
2243	ip6_mr_forward(net, mrt, dev, skb, cache);
2244
2245	read_unlock(&mrt_lock);
2246
2247	return 0;
2248}
2249
2250int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2251		    u32 portid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252{
2253	int err;
2254	struct mr_table *mrt;
2255	struct mfc6_cache *cache;
2256	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2257
2258	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2259	if (!mrt)
2260		return -ENOENT;
2261
2262	read_lock(&mrt_lock);
2263	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2264	if (!cache && skb->dev) {
2265		int vif = ip6mr_find_vif(mrt, skb->dev);
2266
2267		if (vif >= 0)
2268			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2269						     vif);
2270	}
2271
2272	if (!cache) {
2273		struct sk_buff *skb2;
2274		struct ipv6hdr *iph;
2275		struct net_device *dev;
2276		int vif;
2277
 
 
 
 
 
2278		dev = skb->dev;
2279		if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2280			read_unlock(&mrt_lock);
2281			return -ENODEV;
2282		}
2283
2284		/* really correct? */
2285		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2286		if (!skb2) {
2287			read_unlock(&mrt_lock);
2288			return -ENOMEM;
2289		}
2290
2291		NETLINK_CB(skb2).portid = portid;
2292		skb_reset_transport_header(skb2);
2293
2294		skb_put(skb2, sizeof(struct ipv6hdr));
2295		skb_reset_network_header(skb2);
2296
2297		iph = ipv6_hdr(skb2);
2298		iph->version = 0;
2299		iph->priority = 0;
2300		iph->flow_lbl[0] = 0;
2301		iph->flow_lbl[1] = 0;
2302		iph->flow_lbl[2] = 0;
2303		iph->payload_len = 0;
2304		iph->nexthdr = IPPROTO_NONE;
2305		iph->hop_limit = 0;
2306		iph->saddr = rt->rt6i_src.addr;
2307		iph->daddr = rt->rt6i_dst.addr;
2308
2309		err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
2310		read_unlock(&mrt_lock);
2311
2312		return err;
2313	}
2314
2315	err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
 
 
 
2316	read_unlock(&mrt_lock);
2317	return err;
2318}
2319
2320static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2321			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2322			     int flags)
2323{
2324	struct nlmsghdr *nlh;
2325	struct rtmsg *rtm;
2326	int err;
2327
2328	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2329	if (!nlh)
2330		return -EMSGSIZE;
2331
2332	rtm = nlmsg_data(nlh);
2333	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2334	rtm->rtm_dst_len  = 128;
2335	rtm->rtm_src_len  = 128;
2336	rtm->rtm_tos      = 0;
2337	rtm->rtm_table    = mrt->id;
2338	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2339		goto nla_put_failure;
2340	rtm->rtm_type = RTN_MULTICAST;
2341	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2342	if (c->_c.mfc_flags & MFC_STATIC)
2343		rtm->rtm_protocol = RTPROT_STATIC;
2344	else
2345		rtm->rtm_protocol = RTPROT_MROUTED;
2346	rtm->rtm_flags    = 0;
2347
2348	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2349	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2350		goto nla_put_failure;
2351	err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2352	/* do not break the dump if cache is unresolved */
2353	if (err < 0 && err != -ENOENT)
2354		goto nla_put_failure;
2355
2356	nlmsg_end(skb, nlh);
2357	return 0;
2358
2359nla_put_failure:
2360	nlmsg_cancel(skb, nlh);
2361	return -EMSGSIZE;
2362}
2363
2364static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2365			      u32 portid, u32 seq, struct mr_mfc *c,
2366			      int cmd, int flags)
2367{
2368	return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2369				 cmd, flags);
2370}
2371
2372static int mr6_msgsize(bool unresolved, int maxvif)
2373{
2374	size_t len =
2375		NLMSG_ALIGN(sizeof(struct rtmsg))
2376		+ nla_total_size(4)	/* RTA_TABLE */
2377		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2378		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2379		;
2380
2381	if (!unresolved)
2382		len = len
2383		      + nla_total_size(4)	/* RTA_IIF */
2384		      + nla_total_size(0)	/* RTA_MULTIPATH */
2385		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2386						/* RTA_MFC_STATS */
2387		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2388		;
2389
2390	return len;
2391}
2392
2393static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2394			      int cmd)
2395{
2396	struct net *net = read_pnet(&mrt->net);
2397	struct sk_buff *skb;
2398	int err = -ENOBUFS;
2399
2400	skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2401			GFP_ATOMIC);
2402	if (!skb)
2403		goto errout;
2404
2405	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2406	if (err < 0)
2407		goto errout;
2408
2409	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2410	return;
2411
2412errout:
2413	kfree_skb(skb);
2414	if (err < 0)
2415		rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2416}
2417
2418static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2419{
2420	size_t len =
2421		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2422		+ nla_total_size(1)	/* IP6MRA_CREPORT_MSGTYPE */
2423		+ nla_total_size(4)	/* IP6MRA_CREPORT_MIF_ID */
2424					/* IP6MRA_CREPORT_SRC_ADDR */
2425		+ nla_total_size(sizeof(struct in6_addr))
2426					/* IP6MRA_CREPORT_DST_ADDR */
2427		+ nla_total_size(sizeof(struct in6_addr))
2428					/* IP6MRA_CREPORT_PKT */
2429		+ nla_total_size(payloadlen)
2430		;
2431
2432	return len;
2433}
2434
2435static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2436{
2437	struct net *net = read_pnet(&mrt->net);
2438	struct nlmsghdr *nlh;
2439	struct rtgenmsg *rtgenm;
2440	struct mrt6msg *msg;
2441	struct sk_buff *skb;
2442	struct nlattr *nla;
2443	int payloadlen;
2444
2445	payloadlen = pkt->len - sizeof(struct mrt6msg);
2446	msg = (struct mrt6msg *)skb_transport_header(pkt);
2447
2448	skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2449	if (!skb)
2450		goto errout;
2451
2452	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2453			sizeof(struct rtgenmsg), 0);
2454	if (!nlh)
2455		goto errout;
2456	rtgenm = nlmsg_data(nlh);
2457	rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2458	if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2459	    nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2460	    nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2461			     &msg->im6_src) ||
2462	    nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2463			     &msg->im6_dst))
2464		goto nla_put_failure;
2465
2466	nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2467	if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2468				  nla_data(nla), payloadlen))
2469		goto nla_put_failure;
2470
2471	nlmsg_end(skb, nlh);
2472
2473	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2474	return;
2475
2476nla_put_failure:
2477	nlmsg_cancel(skb, nlh);
2478errout:
2479	kfree_skb(skb);
2480	rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2481}
2482
2483static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2484{
2485	const struct nlmsghdr *nlh = cb->nlh;
2486	struct fib_dump_filter filter = {};
2487	int err;
 
 
 
 
 
 
 
2488
2489	if (cb->strict_check) {
2490		err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
2491					    &filter, cb);
2492		if (err < 0)
2493			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494	}
 
 
2495
2496	if (filter.table_id) {
2497		struct mr_table *mrt;
2498
2499		mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2500		if (!mrt) {
2501			if (filter.dump_all_families)
2502				return skb->len;
2503
2504			NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2505			return -ENOENT;
2506		}
2507		err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2508				    &mfc_unres_lock, &filter);
2509		return skb->len ? : err;
2510	}
2511
2512	return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2513				_ip6mr_fill_mroute, &mfc_unres_lock, &filter);
2514}