Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v3.1
   1/*
   2 *	Linux IPv6 multicast routing support for BSD pim6sd
   3 *	Based on net/ipv4/ipmr.c.
   4 *
   5 *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
   6 *		LSIIT Laboratory, Strasbourg, France
   7 *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
   8 *		6WIND, Paris, France
   9 *	Copyright (C)2007,2008 USAGI/WIDE Project
  10 *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
  11 *
  12 *	This program is free software; you can redistribute it and/or
  13 *	modify it under the terms of the GNU General Public License
  14 *	as published by the Free Software Foundation; either version
  15 *	2 of the License, or (at your option) any later version.
  16 *
  17 */
  18
  19#include <asm/system.h>
  20#include <asm/uaccess.h>
  21#include <linux/types.h>
  22#include <linux/sched.h>
  23#include <linux/errno.h>
  24#include <linux/timer.h>
  25#include <linux/mm.h>
  26#include <linux/kernel.h>
  27#include <linux/fcntl.h>
  28#include <linux/stat.h>
  29#include <linux/socket.h>
  30#include <linux/inet.h>
  31#include <linux/netdevice.h>
  32#include <linux/inetdevice.h>
  33#include <linux/proc_fs.h>
  34#include <linux/seq_file.h>
  35#include <linux/init.h>
  36#include <linux/slab.h>
  37#include <linux/compat.h>
  38#include <net/protocol.h>
  39#include <linux/skbuff.h>
  40#include <net/sock.h>
  41#include <net/raw.h>
  42#include <linux/notifier.h>
  43#include <linux/if_arp.h>
  44#include <net/checksum.h>
  45#include <net/netlink.h>
  46#include <net/fib_rules.h>
  47
  48#include <net/ipv6.h>
  49#include <net/ip6_route.h>
  50#include <linux/mroute6.h>
  51#include <linux/pim.h>
  52#include <net/addrconf.h>
  53#include <linux/netfilter_ipv6.h>
 
  54#include <net/ip6_checksum.h>
 
  55
  56struct mr6_table {
  57	struct list_head	list;
  58#ifdef CONFIG_NET_NS
  59	struct net		*net;
  60#endif
  61	u32			id;
  62	struct sock		*mroute6_sk;
  63	struct timer_list	ipmr_expire_timer;
  64	struct list_head	mfc6_unres_queue;
  65	struct list_head	mfc6_cache_array[MFC6_LINES];
  66	struct mif_device	vif6_table[MAXMIFS];
  67	int			maxvif;
  68	atomic_t		cache_resolve_queue_len;
  69	int			mroute_do_assert;
  70	int			mroute_do_pim;
  71#ifdef CONFIG_IPV6_PIMSM_V2
  72	int			mroute_reg_vif_num;
  73#endif
  74};
  75
  76struct ip6mr_rule {
  77	struct fib_rule		common;
  78};
  79
  80struct ip6mr_result {
  81	struct mr6_table	*mrt;
  82};
  83
  84/* Big lock, protecting vif table, mrt cache and mroute socket state.
  85   Note that the changes are semaphored via rtnl_lock.
  86 */
  87
  88static DEFINE_RWLOCK(mrt_lock);
  89
  90/*
  91 *	Multicast router control variables
  92 */
  93
  94#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
  95
  96/* Special spinlock for queue of unresolved entries */
  97static DEFINE_SPINLOCK(mfc_unres_lock);
  98
  99/* We return to original Alan's scheme. Hash table of resolved
 100   entries is changed only in process context and protected
 101   with weak lock mrt_lock. Queue of unresolved entries is protected
 102   with strong spinlock mfc_unres_lock.
 103
 104   In this case data path is free of exclusive locks at all.
 105 */
 106
 107static struct kmem_cache *mrt_cachep __read_mostly;
 108
 109static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
 110static void ip6mr_free_table(struct mr6_table *mrt);
 111
 112static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 113			  struct sk_buff *skb, struct mfc6_cache *cache);
 114static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
 115			      mifi_t mifi, int assert);
 116static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 117			       struct mfc6_cache *c, struct rtmsg *rtm);
 
 
 118static int ip6mr_rtm_dumproute(struct sk_buff *skb,
 119			       struct netlink_callback *cb);
 120static void mroute_clean_tables(struct mr6_table *mrt);
 121static void ipmr_expire_process(unsigned long arg);
 122
 123#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 124#define ip6mr_for_each_table(mrt, net) \
 125	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
 126
 127static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 128{
 129	struct mr6_table *mrt;
 130
 131	ip6mr_for_each_table(mrt, net) {
 132		if (mrt->id == id)
 133			return mrt;
 134	}
 135	return NULL;
 136}
 137
 138static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 139			    struct mr6_table **mrt)
 140{
 141	struct ip6mr_result res;
 142	struct fib_lookup_arg arg = { .result = &res, };
 143	int err;
 
 
 
 
 
 144
 145	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 146			       flowi6_to_flowi(flp6), 0, &arg);
 147	if (err < 0)
 148		return err;
 149	*mrt = res.mrt;
 150	return 0;
 151}
 152
 153static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
 154			     int flags, struct fib_lookup_arg *arg)
 155{
 156	struct ip6mr_result *res = arg->result;
 157	struct mr6_table *mrt;
 158
 159	switch (rule->action) {
 160	case FR_ACT_TO_TBL:
 161		break;
 162	case FR_ACT_UNREACHABLE:
 163		return -ENETUNREACH;
 164	case FR_ACT_PROHIBIT:
 165		return -EACCES;
 166	case FR_ACT_BLACKHOLE:
 167	default:
 168		return -EINVAL;
 169	}
 170
 171	mrt = ip6mr_get_table(rule->fr_net, rule->table);
 172	if (mrt == NULL)
 173		return -EAGAIN;
 174	res->mrt = mrt;
 175	return 0;
 176}
 177
 178static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
 179{
 180	return 1;
 181}
 182
 183static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
 184	FRA_GENERIC_POLICY,
 185};
 186
 187static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
 188				struct fib_rule_hdr *frh, struct nlattr **tb)
 189{
 190	return 0;
 191}
 192
 193static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 194			      struct nlattr **tb)
 195{
 196	return 1;
 197}
 198
 199static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
 200			   struct fib_rule_hdr *frh)
 201{
 202	frh->dst_len = 0;
 203	frh->src_len = 0;
 204	frh->tos     = 0;
 205	return 0;
 206}
 207
 208static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = {
 209	.family		= RTNL_FAMILY_IP6MR,
 210	.rule_size	= sizeof(struct ip6mr_rule),
 211	.addr_size	= sizeof(struct in6_addr),
 212	.action		= ip6mr_rule_action,
 213	.match		= ip6mr_rule_match,
 214	.configure	= ip6mr_rule_configure,
 215	.compare	= ip6mr_rule_compare,
 216	.default_pref	= fib_default_rule_pref,
 217	.fill		= ip6mr_rule_fill,
 218	.nlgroup	= RTNLGRP_IPV6_RULE,
 219	.policy		= ip6mr_rule_policy,
 220	.owner		= THIS_MODULE,
 221};
 222
 223static int __net_init ip6mr_rules_init(struct net *net)
 224{
 225	struct fib_rules_ops *ops;
 226	struct mr6_table *mrt;
 227	int err;
 228
 229	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
 230	if (IS_ERR(ops))
 231		return PTR_ERR(ops);
 232
 233	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
 234
 235	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 236	if (mrt == NULL) {
 237		err = -ENOMEM;
 238		goto err1;
 239	}
 240
 241	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
 242	if (err < 0)
 243		goto err2;
 244
 245	net->ipv6.mr6_rules_ops = ops;
 246	return 0;
 247
 248err2:
 249	kfree(mrt);
 250err1:
 251	fib_rules_unregister(ops);
 252	return err;
 253}
 254
 255static void __net_exit ip6mr_rules_exit(struct net *net)
 256{
 257	struct mr6_table *mrt, *next;
 258
 
 259	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
 260		list_del(&mrt->list);
 261		ip6mr_free_table(mrt);
 262	}
 
 263	fib_rules_unregister(net->ipv6.mr6_rules_ops);
 264}
 265#else
 266#define ip6mr_for_each_table(mrt, net) \
 267	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
 268
 269static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 270{
 271	return net->ipv6.mrt6;
 272}
 273
 274static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 275			    struct mr6_table **mrt)
 276{
 277	*mrt = net->ipv6.mrt6;
 278	return 0;
 279}
 280
 281static int __net_init ip6mr_rules_init(struct net *net)
 282{
 283	net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
 284	return net->ipv6.mrt6 ? 0 : -ENOMEM;
 285}
 286
 287static void __net_exit ip6mr_rules_exit(struct net *net)
 288{
 
 289	ip6mr_free_table(net->ipv6.mrt6);
 
 
 290}
 291#endif
 292
 293static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 294{
 295	struct mr6_table *mrt;
 296	unsigned int i;
 297
 298	mrt = ip6mr_get_table(net, id);
 299	if (mrt != NULL)
 300		return mrt;
 301
 302	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
 303	if (mrt == NULL)
 304		return NULL;
 305	mrt->id = id;
 306	write_pnet(&mrt->net, net);
 307
 308	/* Forwarding cache */
 309	for (i = 0; i < MFC6_LINES; i++)
 310		INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
 311
 312	INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
 313
 314	setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
 315		    (unsigned long)mrt);
 316
 317#ifdef CONFIG_IPV6_PIMSM_V2
 318	mrt->mroute_reg_vif_num = -1;
 319#endif
 320#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 321	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
 322#endif
 323	return mrt;
 324}
 325
 326static void ip6mr_free_table(struct mr6_table *mrt)
 327{
 328	del_timer(&mrt->ipmr_expire_timer);
 329	mroute_clean_tables(mrt);
 330	kfree(mrt);
 331}
 332
 333#ifdef CONFIG_PROC_FS
 334
 335struct ipmr_mfc_iter {
 336	struct seq_net_private p;
 337	struct mr6_table *mrt;
 338	struct list_head *cache;
 339	int ct;
 340};
 341
 342
 343static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
 344					   struct ipmr_mfc_iter *it, loff_t pos)
 345{
 346	struct mr6_table *mrt = it->mrt;
 347	struct mfc6_cache *mfc;
 348
 349	read_lock(&mrt_lock);
 350	for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
 351		it->cache = &mrt->mfc6_cache_array[it->ct];
 352		list_for_each_entry(mfc, it->cache, list)
 353			if (pos-- == 0)
 354				return mfc;
 355	}
 356	read_unlock(&mrt_lock);
 357
 358	spin_lock_bh(&mfc_unres_lock);
 359	it->cache = &mrt->mfc6_unres_queue;
 360	list_for_each_entry(mfc, it->cache, list)
 361		if (pos-- == 0)
 362			return mfc;
 363	spin_unlock_bh(&mfc_unres_lock);
 364
 365	it->cache = NULL;
 366	return NULL;
 367}
 368
 369/*
 370 *	The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
 371 */
 372
 373struct ipmr_vif_iter {
 374	struct seq_net_private p;
 375	struct mr6_table *mrt;
 376	int ct;
 377};
 378
 379static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
 380					    struct ipmr_vif_iter *iter,
 381					    loff_t pos)
 382{
 383	struct mr6_table *mrt = iter->mrt;
 384
 385	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
 386		if (!MIF_EXISTS(mrt, iter->ct))
 387			continue;
 388		if (pos-- == 0)
 389			return &mrt->vif6_table[iter->ct];
 390	}
 391	return NULL;
 392}
 393
 394static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
 395	__acquires(mrt_lock)
 396{
 397	struct ipmr_vif_iter *iter = seq->private;
 398	struct net *net = seq_file_net(seq);
 399	struct mr6_table *mrt;
 400
 401	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 402	if (mrt == NULL)
 403		return ERR_PTR(-ENOENT);
 404
 405	iter->mrt = mrt;
 406
 407	read_lock(&mrt_lock);
 408	return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
 409		: SEQ_START_TOKEN;
 410}
 411
 412static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 413{
 414	struct ipmr_vif_iter *iter = seq->private;
 415	struct net *net = seq_file_net(seq);
 416	struct mr6_table *mrt = iter->mrt;
 417
 418	++*pos;
 419	if (v == SEQ_START_TOKEN)
 420		return ip6mr_vif_seq_idx(net, iter, 0);
 421
 422	while (++iter->ct < mrt->maxvif) {
 423		if (!MIF_EXISTS(mrt, iter->ct))
 424			continue;
 425		return &mrt->vif6_table[iter->ct];
 426	}
 427	return NULL;
 428}
 429
 430static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
 431	__releases(mrt_lock)
 432{
 433	read_unlock(&mrt_lock);
 434}
 435
 436static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
 437{
 438	struct ipmr_vif_iter *iter = seq->private;
 439	struct mr6_table *mrt = iter->mrt;
 440
 441	if (v == SEQ_START_TOKEN) {
 442		seq_puts(seq,
 443			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
 444	} else {
 445		const struct mif_device *vif = v;
 446		const char *name = vif->dev ? vif->dev->name : "none";
 447
 448		seq_printf(seq,
 449			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
 450			   vif - mrt->vif6_table,
 451			   name, vif->bytes_in, vif->pkt_in,
 452			   vif->bytes_out, vif->pkt_out,
 453			   vif->flags);
 454	}
 455	return 0;
 456}
 457
 458static const struct seq_operations ip6mr_vif_seq_ops = {
 459	.start = ip6mr_vif_seq_start,
 460	.next  = ip6mr_vif_seq_next,
 461	.stop  = ip6mr_vif_seq_stop,
 462	.show  = ip6mr_vif_seq_show,
 463};
 464
 465static int ip6mr_vif_open(struct inode *inode, struct file *file)
 466{
 467	return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
 468			    sizeof(struct ipmr_vif_iter));
 469}
 470
 471static const struct file_operations ip6mr_vif_fops = {
 472	.owner	 = THIS_MODULE,
 473	.open    = ip6mr_vif_open,
 474	.read    = seq_read,
 475	.llseek  = seq_lseek,
 476	.release = seq_release_net,
 477};
 478
 479static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 480{
 481	struct ipmr_mfc_iter *it = seq->private;
 482	struct net *net = seq_file_net(seq);
 483	struct mr6_table *mrt;
 484
 485	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 486	if (mrt == NULL)
 487		return ERR_PTR(-ENOENT);
 488
 489	it->mrt = mrt;
 490	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
 491		: SEQ_START_TOKEN;
 492}
 493
 494static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 495{
 496	struct mfc6_cache *mfc = v;
 497	struct ipmr_mfc_iter *it = seq->private;
 498	struct net *net = seq_file_net(seq);
 499	struct mr6_table *mrt = it->mrt;
 500
 501	++*pos;
 502
 503	if (v == SEQ_START_TOKEN)
 504		return ipmr_mfc_seq_idx(net, seq->private, 0);
 505
 506	if (mfc->list.next != it->cache)
 507		return list_entry(mfc->list.next, struct mfc6_cache, list);
 508
 509	if (it->cache == &mrt->mfc6_unres_queue)
 510		goto end_of_list;
 511
 512	BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
 513
 514	while (++it->ct < MFC6_LINES) {
 515		it->cache = &mrt->mfc6_cache_array[it->ct];
 516		if (list_empty(it->cache))
 517			continue;
 518		return list_first_entry(it->cache, struct mfc6_cache, list);
 519	}
 520
 521	/* exhausted cache_array, show unresolved */
 522	read_unlock(&mrt_lock);
 523	it->cache = &mrt->mfc6_unres_queue;
 524	it->ct = 0;
 525
 526	spin_lock_bh(&mfc_unres_lock);
 527	if (!list_empty(it->cache))
 528		return list_first_entry(it->cache, struct mfc6_cache, list);
 529
 530 end_of_list:
 531	spin_unlock_bh(&mfc_unres_lock);
 532	it->cache = NULL;
 533
 534	return NULL;
 535}
 536
 537static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
 538{
 539	struct ipmr_mfc_iter *it = seq->private;
 540	struct mr6_table *mrt = it->mrt;
 541
 542	if (it->cache == &mrt->mfc6_unres_queue)
 543		spin_unlock_bh(&mfc_unres_lock);
 544	else if (it->cache == mrt->mfc6_cache_array)
 545		read_unlock(&mrt_lock);
 546}
 547
 548static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
 549{
 550	int n;
 551
 552	if (v == SEQ_START_TOKEN) {
 553		seq_puts(seq,
 554			 "Group                            "
 555			 "Origin                           "
 556			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
 557	} else {
 558		const struct mfc6_cache *mfc = v;
 559		const struct ipmr_mfc_iter *it = seq->private;
 560		struct mr6_table *mrt = it->mrt;
 561
 562		seq_printf(seq, "%pI6 %pI6 %-3hd",
 563			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
 564			   mfc->mf6c_parent);
 565
 566		if (it->cache != &mrt->mfc6_unres_queue) {
 567			seq_printf(seq, " %8lu %8lu %8lu",
 568				   mfc->mfc_un.res.pkt,
 569				   mfc->mfc_un.res.bytes,
 570				   mfc->mfc_un.res.wrong_if);
 571			for (n = mfc->mfc_un.res.minvif;
 572			     n < mfc->mfc_un.res.maxvif; n++) {
 573				if (MIF_EXISTS(mrt, n) &&
 574				    mfc->mfc_un.res.ttls[n] < 255)
 575					seq_printf(seq,
 576						   " %2d:%-3d",
 577						   n, mfc->mfc_un.res.ttls[n]);
 578			}
 579		} else {
 580			/* unresolved mfc_caches don't contain
 581			 * pkt, bytes and wrong_if values
 582			 */
 583			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
 584		}
 585		seq_putc(seq, '\n');
 586	}
 587	return 0;
 588}
 589
 590static const struct seq_operations ipmr_mfc_seq_ops = {
 591	.start = ipmr_mfc_seq_start,
 592	.next  = ipmr_mfc_seq_next,
 593	.stop  = ipmr_mfc_seq_stop,
 594	.show  = ipmr_mfc_seq_show,
 595};
 596
 597static int ipmr_mfc_open(struct inode *inode, struct file *file)
 598{
 599	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
 600			    sizeof(struct ipmr_mfc_iter));
 601}
 602
 603static const struct file_operations ip6mr_mfc_fops = {
 604	.owner	 = THIS_MODULE,
 605	.open    = ipmr_mfc_open,
 606	.read    = seq_read,
 607	.llseek  = seq_lseek,
 608	.release = seq_release_net,
 609};
 610#endif
 611
 612#ifdef CONFIG_IPV6_PIMSM_V2
 613
 614static int pim6_rcv(struct sk_buff *skb)
 615{
 616	struct pimreghdr *pim;
 617	struct ipv6hdr   *encap;
 618	struct net_device  *reg_dev = NULL;
 619	struct net *net = dev_net(skb->dev);
 620	struct mr6_table *mrt;
 621	struct flowi6 fl6 = {
 622		.flowi6_iif	= skb->dev->ifindex,
 623		.flowi6_mark	= skb->mark,
 624	};
 625	int reg_vif_num;
 626
 627	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
 628		goto drop;
 629
 630	pim = (struct pimreghdr *)skb_transport_header(skb);
 631	if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
 632	    (pim->flags & PIM_NULL_REGISTER) ||
 633	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 634			     sizeof(*pim), IPPROTO_PIM,
 635			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
 636	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
 637		goto drop;
 638
 639	/* check if the inner packet is destined to mcast group */
 640	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
 641				   sizeof(*pim));
 642
 643	if (!ipv6_addr_is_multicast(&encap->daddr) ||
 644	    encap->payload_len == 0 ||
 645	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
 646		goto drop;
 647
 648	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 649		goto drop;
 650	reg_vif_num = mrt->mroute_reg_vif_num;
 651
 652	read_lock(&mrt_lock);
 653	if (reg_vif_num >= 0)
 654		reg_dev = mrt->vif6_table[reg_vif_num].dev;
 655	if (reg_dev)
 656		dev_hold(reg_dev);
 657	read_unlock(&mrt_lock);
 658
 659	if (reg_dev == NULL)
 660		goto drop;
 661
 662	skb->mac_header = skb->network_header;
 663	skb_pull(skb, (u8 *)encap - skb->data);
 664	skb_reset_network_header(skb);
 665	skb->protocol = htons(ETH_P_IPV6);
 666	skb->ip_summed = CHECKSUM_NONE;
 667	skb->pkt_type = PACKET_HOST;
 668
 669	skb_tunnel_rx(skb, reg_dev);
 670
 671	netif_rx(skb);
 672
 673	dev_put(reg_dev);
 674	return 0;
 675 drop:
 676	kfree_skb(skb);
 677	return 0;
 678}
 679
 680static const struct inet6_protocol pim6_protocol = {
 681	.handler	=	pim6_rcv,
 682};
 683
 684/* Service routines creating virtual interfaces: PIMREG */
 685
 686static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
 687				      struct net_device *dev)
 688{
 689	struct net *net = dev_net(dev);
 690	struct mr6_table *mrt;
 691	struct flowi6 fl6 = {
 692		.flowi6_oif	= dev->ifindex,
 693		.flowi6_iif	= skb->skb_iif,
 694		.flowi6_mark	= skb->mark,
 695	};
 696	int err;
 697
 698	err = ip6mr_fib_lookup(net, &fl6, &mrt);
 699	if (err < 0) {
 700		kfree_skb(skb);
 701		return err;
 702	}
 703
 704	read_lock(&mrt_lock);
 705	dev->stats.tx_bytes += skb->len;
 706	dev->stats.tx_packets++;
 707	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
 708	read_unlock(&mrt_lock);
 709	kfree_skb(skb);
 710	return NETDEV_TX_OK;
 711}
 712
 713static const struct net_device_ops reg_vif_netdev_ops = {
 714	.ndo_start_xmit	= reg_vif_xmit,
 715};
 716
 717static void reg_vif_setup(struct net_device *dev)
 718{
 719	dev->type		= ARPHRD_PIMREG;
 720	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
 721	dev->flags		= IFF_NOARP;
 722	dev->netdev_ops		= &reg_vif_netdev_ops;
 723	dev->destructor		= free_netdev;
 724	dev->features		|= NETIF_F_NETNS_LOCAL;
 725}
 726
 727static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
 728{
 729	struct net_device *dev;
 730	char name[IFNAMSIZ];
 731
 732	if (mrt->id == RT6_TABLE_DFLT)
 733		sprintf(name, "pim6reg");
 734	else
 735		sprintf(name, "pim6reg%u", mrt->id);
 736
 737	dev = alloc_netdev(0, name, reg_vif_setup);
 738	if (dev == NULL)
 739		return NULL;
 740
 741	dev_net_set(dev, net);
 742
 743	if (register_netdevice(dev)) {
 744		free_netdev(dev);
 745		return NULL;
 746	}
 747	dev->iflink = 0;
 748
 749	if (dev_open(dev))
 750		goto failure;
 751
 752	dev_hold(dev);
 753	return dev;
 754
 755failure:
 756	/* allow the register to be completed before unregistering. */
 757	rtnl_unlock();
 758	rtnl_lock();
 759
 760	unregister_netdevice(dev);
 761	return NULL;
 762}
 763#endif
 764
 765/*
 766 *	Delete a VIF entry
 767 */
 768
 769static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
 770{
 771	struct mif_device *v;
 772	struct net_device *dev;
 773	struct inet6_dev *in6_dev;
 774
 775	if (vifi < 0 || vifi >= mrt->maxvif)
 776		return -EADDRNOTAVAIL;
 777
 778	v = &mrt->vif6_table[vifi];
 779
 780	write_lock_bh(&mrt_lock);
 781	dev = v->dev;
 782	v->dev = NULL;
 783
 784	if (!dev) {
 785		write_unlock_bh(&mrt_lock);
 786		return -EADDRNOTAVAIL;
 787	}
 788
 789#ifdef CONFIG_IPV6_PIMSM_V2
 790	if (vifi == mrt->mroute_reg_vif_num)
 791		mrt->mroute_reg_vif_num = -1;
 792#endif
 793
 794	if (vifi + 1 == mrt->maxvif) {
 795		int tmp;
 796		for (tmp = vifi - 1; tmp >= 0; tmp--) {
 797			if (MIF_EXISTS(mrt, tmp))
 798				break;
 799		}
 800		mrt->maxvif = tmp + 1;
 801	}
 802
 803	write_unlock_bh(&mrt_lock);
 804
 805	dev_set_allmulti(dev, -1);
 806
 807	in6_dev = __in6_dev_get(dev);
 808	if (in6_dev)
 809		in6_dev->cnf.mc_forwarding--;
 
 
 
 
 810
 811	if (v->flags & MIFF_REGISTER)
 812		unregister_netdevice_queue(dev, head);
 813
 814	dev_put(dev);
 815	return 0;
 816}
 817
 818static inline void ip6mr_cache_free(struct mfc6_cache *c)
 819{
 820	kmem_cache_free(mrt_cachep, c);
 821}
 822
 823/* Destroy an unresolved cache entry, killing queued skbs
 824   and reporting error to netlink readers.
 825 */
 826
 827static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
 828{
 829	struct net *net = read_pnet(&mrt->net);
 830	struct sk_buff *skb;
 831
 832	atomic_dec(&mrt->cache_resolve_queue_len);
 833
 834	while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
 835		if (ipv6_hdr(skb)->version == 0) {
 836			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 837			nlh->nlmsg_type = NLMSG_ERROR;
 838			nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
 839			skb_trim(skb, nlh->nlmsg_len);
 840			((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
 841			rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
 842		} else
 843			kfree_skb(skb);
 844	}
 845
 846	ip6mr_cache_free(c);
 847}
 848
 849
 850/* Timer process for all the unresolved queue. */
 851
 852static void ipmr_do_expire_process(struct mr6_table *mrt)
 853{
 854	unsigned long now = jiffies;
 855	unsigned long expires = 10 * HZ;
 856	struct mfc6_cache *c, *next;
 857
 858	list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
 859		if (time_after(c->mfc_un.unres.expires, now)) {
 860			/* not yet... */
 861			unsigned long interval = c->mfc_un.unres.expires - now;
 862			if (interval < expires)
 863				expires = interval;
 864			continue;
 865		}
 866
 867		list_del(&c->list);
 
 868		ip6mr_destroy_unres(mrt, c);
 869	}
 870
 871	if (!list_empty(&mrt->mfc6_unres_queue))
 872		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 873}
 874
 875static void ipmr_expire_process(unsigned long arg)
 876{
 877	struct mr6_table *mrt = (struct mr6_table *)arg;
 878
 879	if (!spin_trylock(&mfc_unres_lock)) {
 880		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
 881		return;
 882	}
 883
 884	if (!list_empty(&mrt->mfc6_unres_queue))
 885		ipmr_do_expire_process(mrt);
 886
 887	spin_unlock(&mfc_unres_lock);
 888}
 889
 890/* Fill oifs list. It is called under write locked mrt_lock. */
 891
 892static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
 893				    unsigned char *ttls)
 894{
 895	int vifi;
 896
 897	cache->mfc_un.res.minvif = MAXMIFS;
 898	cache->mfc_un.res.maxvif = 0;
 899	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
 900
 901	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
 902		if (MIF_EXISTS(mrt, vifi) &&
 903		    ttls[vifi] && ttls[vifi] < 255) {
 904			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
 905			if (cache->mfc_un.res.minvif > vifi)
 906				cache->mfc_un.res.minvif = vifi;
 907			if (cache->mfc_un.res.maxvif <= vifi)
 908				cache->mfc_un.res.maxvif = vifi + 1;
 909		}
 910	}
 911}
 912
 913static int mif6_add(struct net *net, struct mr6_table *mrt,
 914		    struct mif6ctl *vifc, int mrtsock)
 915{
 916	int vifi = vifc->mif6c_mifi;
 917	struct mif_device *v = &mrt->vif6_table[vifi];
 918	struct net_device *dev;
 919	struct inet6_dev *in6_dev;
 920	int err;
 921
 922	/* Is vif busy ? */
 923	if (MIF_EXISTS(mrt, vifi))
 924		return -EADDRINUSE;
 925
 926	switch (vifc->mif6c_flags) {
 927#ifdef CONFIG_IPV6_PIMSM_V2
 928	case MIFF_REGISTER:
 929		/*
 930		 * Special Purpose VIF in PIM
 931		 * All the packets will be sent to the daemon
 932		 */
 933		if (mrt->mroute_reg_vif_num >= 0)
 934			return -EADDRINUSE;
 935		dev = ip6mr_reg_vif(net, mrt);
 936		if (!dev)
 937			return -ENOBUFS;
 938		err = dev_set_allmulti(dev, 1);
 939		if (err) {
 940			unregister_netdevice(dev);
 941			dev_put(dev);
 942			return err;
 943		}
 944		break;
 945#endif
 946	case 0:
 947		dev = dev_get_by_index(net, vifc->mif6c_pifi);
 948		if (!dev)
 949			return -EADDRNOTAVAIL;
 950		err = dev_set_allmulti(dev, 1);
 951		if (err) {
 952			dev_put(dev);
 953			return err;
 954		}
 955		break;
 956	default:
 957		return -EINVAL;
 958	}
 959
 960	in6_dev = __in6_dev_get(dev);
 961	if (in6_dev)
 962		in6_dev->cnf.mc_forwarding++;
 
 
 
 
 963
 964	/*
 965	 *	Fill in the VIF structures
 966	 */
 967	v->rate_limit = vifc->vifc_rate_limit;
 968	v->flags = vifc->mif6c_flags;
 969	if (!mrtsock)
 970		v->flags |= VIFF_STATIC;
 971	v->threshold = vifc->vifc_threshold;
 972	v->bytes_in = 0;
 973	v->bytes_out = 0;
 974	v->pkt_in = 0;
 975	v->pkt_out = 0;
 976	v->link = dev->ifindex;
 977	if (v->flags & MIFF_REGISTER)
 978		v->link = dev->iflink;
 979
 980	/* And finish update writing critical data */
 981	write_lock_bh(&mrt_lock);
 982	v->dev = dev;
 983#ifdef CONFIG_IPV6_PIMSM_V2
 984	if (v->flags & MIFF_REGISTER)
 985		mrt->mroute_reg_vif_num = vifi;
 986#endif
 987	if (vifi + 1 > mrt->maxvif)
 988		mrt->maxvif = vifi + 1;
 989	write_unlock_bh(&mrt_lock);
 990	return 0;
 991}
 992
 993static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
 994					   const struct in6_addr *origin,
 995					   const struct in6_addr *mcastgrp)
 996{
 997	int line = MFC6_HASH(mcastgrp, origin);
 998	struct mfc6_cache *c;
 999
1000	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1001		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1002		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1003			return c;
1004	}
1005	return NULL;
1006}
1007
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008/*
1009 *	Allocate a multicast cache entry
1010 */
1011static struct mfc6_cache *ip6mr_cache_alloc(void)
1012{
1013	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1014	if (c == NULL)
1015		return NULL;
1016	c->mfc_un.res.minvif = MAXMIFS;
1017	return c;
1018}
1019
1020static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1021{
1022	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1023	if (c == NULL)
1024		return NULL;
1025	skb_queue_head_init(&c->mfc_un.unres.unresolved);
1026	c->mfc_un.unres.expires = jiffies + 10 * HZ;
1027	return c;
1028}
1029
1030/*
1031 *	A cache entry has gone into a resolved state from queued
1032 */
1033
1034static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1035				struct mfc6_cache *uc, struct mfc6_cache *c)
1036{
1037	struct sk_buff *skb;
1038
1039	/*
1040	 *	Play the pending entries through our router
1041	 */
1042
1043	while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1044		if (ipv6_hdr(skb)->version == 0) {
1045			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1046
1047			if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
1048				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1049			} else {
1050				nlh->nlmsg_type = NLMSG_ERROR;
1051				nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
1052				skb_trim(skb, nlh->nlmsg_len);
1053				((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
1054			}
1055			rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
1056		} else
1057			ip6_mr_forward(net, mrt, skb, c);
1058	}
1059}
1060
1061/*
1062 *	Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1063 *	expects the following bizarre scheme.
1064 *
1065 *	Called under mrt_lock.
1066 */
1067
1068static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1069			      mifi_t mifi, int assert)
1070{
1071	struct sk_buff *skb;
1072	struct mrt6msg *msg;
1073	int ret;
1074
1075#ifdef CONFIG_IPV6_PIMSM_V2
1076	if (assert == MRT6MSG_WHOLEPKT)
1077		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1078						+sizeof(*msg));
1079	else
1080#endif
1081		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1082
1083	if (!skb)
1084		return -ENOBUFS;
1085
1086	/* I suppose that internal messages
1087	 * do not require checksums */
1088
1089	skb->ip_summed = CHECKSUM_UNNECESSARY;
1090
1091#ifdef CONFIG_IPV6_PIMSM_V2
1092	if (assert == MRT6MSG_WHOLEPKT) {
1093		/* Ugly, but we have no choice with this interface.
1094		   Duplicate old header, fix length etc.
1095		   And all this only to mangle msg->im6_msgtype and
1096		   to set msg->im6_mbz to "mbz" :-)
1097		 */
1098		skb_push(skb, -skb_network_offset(pkt));
1099
1100		skb_push(skb, sizeof(*msg));
1101		skb_reset_transport_header(skb);
1102		msg = (struct mrt6msg *)skb_transport_header(skb);
1103		msg->im6_mbz = 0;
1104		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1105		msg->im6_mif = mrt->mroute_reg_vif_num;
1106		msg->im6_pad = 0;
1107		ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1108		ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1109
1110		skb->ip_summed = CHECKSUM_UNNECESSARY;
1111	} else
1112#endif
1113	{
1114	/*
1115	 *	Copy the IP header
1116	 */
1117
1118	skb_put(skb, sizeof(struct ipv6hdr));
1119	skb_reset_network_header(skb);
1120	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1121
1122	/*
1123	 *	Add our header
1124	 */
1125	skb_put(skb, sizeof(*msg));
1126	skb_reset_transport_header(skb);
1127	msg = (struct mrt6msg *)skb_transport_header(skb);
1128
1129	msg->im6_mbz = 0;
1130	msg->im6_msgtype = assert;
1131	msg->im6_mif = mifi;
1132	msg->im6_pad = 0;
1133	ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1134	ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1135
1136	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1137	skb->ip_summed = CHECKSUM_UNNECESSARY;
1138	}
1139
1140	if (mrt->mroute6_sk == NULL) {
1141		kfree_skb(skb);
1142		return -EINVAL;
1143	}
1144
1145	/*
1146	 *	Deliver to user space multicast routing algorithms
1147	 */
1148	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1149	if (ret < 0) {
1150		if (net_ratelimit())
1151			printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
1152		kfree_skb(skb);
1153	}
1154
1155	return ret;
1156}
1157
1158/*
1159 *	Queue a packet for resolution. It gets locked cache entry!
1160 */
1161
1162static int
1163ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1164{
1165	bool found = false;
1166	int err;
1167	struct mfc6_cache *c;
1168
1169	spin_lock_bh(&mfc_unres_lock);
1170	list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1171		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1172		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1173			found = true;
1174			break;
1175		}
1176	}
1177
1178	if (!found) {
1179		/*
1180		 *	Create a new entry if allowable
1181		 */
1182
1183		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1184		    (c = ip6mr_cache_alloc_unres()) == NULL) {
1185			spin_unlock_bh(&mfc_unres_lock);
1186
1187			kfree_skb(skb);
1188			return -ENOBUFS;
1189		}
1190
1191		/*
1192		 *	Fill in the new cache entry
1193		 */
1194		c->mf6c_parent = -1;
1195		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1196		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1197
1198		/*
1199		 *	Reflect first query at pim6sd
1200		 */
1201		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1202		if (err < 0) {
1203			/* If the report failed throw the cache entry
1204			   out - Brad Parker
1205			 */
1206			spin_unlock_bh(&mfc_unres_lock);
1207
1208			ip6mr_cache_free(c);
1209			kfree_skb(skb);
1210			return err;
1211		}
1212
1213		atomic_inc(&mrt->cache_resolve_queue_len);
1214		list_add(&c->list, &mrt->mfc6_unres_queue);
 
1215
1216		ipmr_do_expire_process(mrt);
1217	}
1218
1219	/*
1220	 *	See if we can append the packet
1221	 */
1222	if (c->mfc_un.unres.unresolved.qlen > 3) {
1223		kfree_skb(skb);
1224		err = -ENOBUFS;
1225	} else {
1226		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1227		err = 0;
1228	}
1229
1230	spin_unlock_bh(&mfc_unres_lock);
1231	return err;
1232}
1233
1234/*
1235 *	MFC6 cache manipulation by user space
1236 */
1237
1238static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
 
1239{
1240	int line;
1241	struct mfc6_cache *c, *next;
1242
1243	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1244
1245	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1246		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1247		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
 
 
1248			write_lock_bh(&mrt_lock);
1249			list_del(&c->list);
1250			write_unlock_bh(&mrt_lock);
1251
 
1252			ip6mr_cache_free(c);
1253			return 0;
1254		}
1255	}
1256	return -ENOENT;
1257}
1258
1259static int ip6mr_device_event(struct notifier_block *this,
1260			      unsigned long event, void *ptr)
1261{
1262	struct net_device *dev = ptr;
1263	struct net *net = dev_net(dev);
1264	struct mr6_table *mrt;
1265	struct mif_device *v;
1266	int ct;
1267	LIST_HEAD(list);
1268
1269	if (event != NETDEV_UNREGISTER)
1270		return NOTIFY_DONE;
1271
1272	ip6mr_for_each_table(mrt, net) {
1273		v = &mrt->vif6_table[0];
1274		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1275			if (v->dev == dev)
1276				mif6_delete(mrt, ct, &list);
1277		}
1278	}
1279	unregister_netdevice_many(&list);
1280
1281	return NOTIFY_DONE;
1282}
1283
1284static struct notifier_block ip6_mr_notifier = {
1285	.notifier_call = ip6mr_device_event
1286};
1287
1288/*
1289 *	Setup for IP multicast routing
1290 */
1291
1292static int __net_init ip6mr_net_init(struct net *net)
1293{
1294	int err;
1295
1296	err = ip6mr_rules_init(net);
1297	if (err < 0)
1298		goto fail;
1299
1300#ifdef CONFIG_PROC_FS
1301	err = -ENOMEM;
1302	if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
1303		goto proc_vif_fail;
1304	if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1305		goto proc_cache_fail;
1306#endif
1307
1308	return 0;
1309
1310#ifdef CONFIG_PROC_FS
1311proc_cache_fail:
1312	proc_net_remove(net, "ip6_mr_vif");
1313proc_vif_fail:
1314	ip6mr_rules_exit(net);
1315#endif
1316fail:
1317	return err;
1318}
1319
1320static void __net_exit ip6mr_net_exit(struct net *net)
1321{
1322#ifdef CONFIG_PROC_FS
1323	proc_net_remove(net, "ip6_mr_cache");
1324	proc_net_remove(net, "ip6_mr_vif");
1325#endif
1326	ip6mr_rules_exit(net);
1327}
1328
1329static struct pernet_operations ip6mr_net_ops = {
1330	.init = ip6mr_net_init,
1331	.exit = ip6mr_net_exit,
1332};
1333
1334int __init ip6_mr_init(void)
1335{
1336	int err;
1337
1338	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1339				       sizeof(struct mfc6_cache),
1340				       0, SLAB_HWCACHE_ALIGN,
1341				       NULL);
1342	if (!mrt_cachep)
1343		return -ENOMEM;
1344
1345	err = register_pernet_subsys(&ip6mr_net_ops);
1346	if (err)
1347		goto reg_pernet_fail;
1348
1349	err = register_netdevice_notifier(&ip6_mr_notifier);
1350	if (err)
1351		goto reg_notif_fail;
1352#ifdef CONFIG_IPV6_PIMSM_V2
1353	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1354		printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1355		err = -EAGAIN;
1356		goto add_proto_fail;
1357	}
1358#endif
1359	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1360		      ip6mr_rtm_dumproute, NULL);
1361	return 0;
1362#ifdef CONFIG_IPV6_PIMSM_V2
1363add_proto_fail:
1364	unregister_netdevice_notifier(&ip6_mr_notifier);
1365#endif
1366reg_notif_fail:
1367	unregister_pernet_subsys(&ip6mr_net_ops);
1368reg_pernet_fail:
1369	kmem_cache_destroy(mrt_cachep);
1370	return err;
1371}
1372
1373void ip6_mr_cleanup(void)
1374{
1375	unregister_netdevice_notifier(&ip6_mr_notifier);
1376	unregister_pernet_subsys(&ip6mr_net_ops);
1377	kmem_cache_destroy(mrt_cachep);
1378}
1379
1380static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1381			 struct mf6cctl *mfc, int mrtsock)
1382{
1383	bool found = false;
1384	int line;
1385	struct mfc6_cache *uc, *c;
1386	unsigned char ttls[MAXMIFS];
1387	int i;
1388
1389	if (mfc->mf6cc_parent >= MAXMIFS)
1390		return -ENFILE;
1391
1392	memset(ttls, 255, MAXMIFS);
1393	for (i = 0; i < MAXMIFS; i++) {
1394		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1395			ttls[i] = 1;
1396
1397	}
1398
1399	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1400
1401	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1402		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1403		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
 
 
1404			found = true;
1405			break;
1406		}
1407	}
1408
1409	if (found) {
1410		write_lock_bh(&mrt_lock);
1411		c->mf6c_parent = mfc->mf6cc_parent;
1412		ip6mr_update_thresholds(mrt, c, ttls);
1413		if (!mrtsock)
1414			c->mfc_flags |= MFC_STATIC;
1415		write_unlock_bh(&mrt_lock);
 
1416		return 0;
1417	}
1418
1419	if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
 
1420		return -EINVAL;
1421
1422	c = ip6mr_cache_alloc();
1423	if (c == NULL)
1424		return -ENOMEM;
1425
1426	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1427	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1428	c->mf6c_parent = mfc->mf6cc_parent;
1429	ip6mr_update_thresholds(mrt, c, ttls);
1430	if (!mrtsock)
1431		c->mfc_flags |= MFC_STATIC;
1432
1433	write_lock_bh(&mrt_lock);
1434	list_add(&c->list, &mrt->mfc6_cache_array[line]);
1435	write_unlock_bh(&mrt_lock);
1436
1437	/*
1438	 *	Check to see if we resolved a queued list. If so we
1439	 *	need to send on the frames and tidy up.
1440	 */
1441	found = false;
1442	spin_lock_bh(&mfc_unres_lock);
1443	list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1444		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1445		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1446			list_del(&uc->list);
1447			atomic_dec(&mrt->cache_resolve_queue_len);
1448			found = true;
1449			break;
1450		}
1451	}
1452	if (list_empty(&mrt->mfc6_unres_queue))
1453		del_timer(&mrt->ipmr_expire_timer);
1454	spin_unlock_bh(&mfc_unres_lock);
1455
1456	if (found) {
1457		ip6mr_cache_resolve(net, mrt, uc, c);
1458		ip6mr_cache_free(uc);
1459	}
 
1460	return 0;
1461}
1462
1463/*
1464 *	Close the multicast socket, and clear the vif tables etc
1465 */
1466
1467static void mroute_clean_tables(struct mr6_table *mrt)
1468{
1469	int i;
1470	LIST_HEAD(list);
1471	struct mfc6_cache *c, *next;
1472
1473	/*
1474	 *	Shut down all active vif entries
1475	 */
1476	for (i = 0; i < mrt->maxvif; i++) {
1477		if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1478			mif6_delete(mrt, i, &list);
1479	}
1480	unregister_netdevice_many(&list);
1481
1482	/*
1483	 *	Wipe the cache
1484	 */
1485	for (i = 0; i < MFC6_LINES; i++) {
1486		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1487			if (c->mfc_flags & MFC_STATIC)
1488				continue;
1489			write_lock_bh(&mrt_lock);
1490			list_del(&c->list);
1491			write_unlock_bh(&mrt_lock);
1492
 
1493			ip6mr_cache_free(c);
1494		}
1495	}
1496
1497	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1498		spin_lock_bh(&mfc_unres_lock);
1499		list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1500			list_del(&c->list);
 
1501			ip6mr_destroy_unres(mrt, c);
1502		}
1503		spin_unlock_bh(&mfc_unres_lock);
1504	}
1505}
1506
1507static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1508{
1509	int err = 0;
1510	struct net *net = sock_net(sk);
1511
1512	rtnl_lock();
1513	write_lock_bh(&mrt_lock);
1514	if (likely(mrt->mroute6_sk == NULL)) {
1515		mrt->mroute6_sk = sk;
1516		net->ipv6.devconf_all->mc_forwarding++;
 
 
 
1517	}
1518	else
1519		err = -EADDRINUSE;
1520	write_unlock_bh(&mrt_lock);
1521
1522	rtnl_unlock();
1523
1524	return err;
1525}
1526
1527int ip6mr_sk_done(struct sock *sk)
1528{
1529	int err = -EACCES;
1530	struct net *net = sock_net(sk);
1531	struct mr6_table *mrt;
1532
1533	rtnl_lock();
1534	ip6mr_for_each_table(mrt, net) {
1535		if (sk == mrt->mroute6_sk) {
1536			write_lock_bh(&mrt_lock);
1537			mrt->mroute6_sk = NULL;
1538			net->ipv6.devconf_all->mc_forwarding--;
 
 
 
 
1539			write_unlock_bh(&mrt_lock);
1540
1541			mroute_clean_tables(mrt);
1542			err = 0;
1543			break;
1544		}
1545	}
1546	rtnl_unlock();
1547
1548	return err;
1549}
1550
1551struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1552{
1553	struct mr6_table *mrt;
1554	struct flowi6 fl6 = {
1555		.flowi6_iif	= skb->skb_iif,
1556		.flowi6_oif	= skb->dev->ifindex,
1557		.flowi6_mark	= skb->mark,
1558	};
1559
1560	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1561		return NULL;
1562
1563	return mrt->mroute6_sk;
1564}
1565
1566/*
1567 *	Socket options and virtual interface manipulation. The whole
1568 *	virtual interface system is a complete heap, but unfortunately
1569 *	that's how BSD mrouted happens to think. Maybe one day with a proper
1570 *	MOSPF/PIM router set up we can clean this up.
1571 */
1572
1573int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1574{
1575	int ret;
1576	struct mif6ctl vif;
1577	struct mf6cctl mfc;
1578	mifi_t mifi;
1579	struct net *net = sock_net(sk);
1580	struct mr6_table *mrt;
1581
1582	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1583	if (mrt == NULL)
1584		return -ENOENT;
1585
1586	if (optname != MRT6_INIT) {
1587		if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
1588			return -EACCES;
1589	}
1590
1591	switch (optname) {
1592	case MRT6_INIT:
1593		if (sk->sk_type != SOCK_RAW ||
1594		    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1595			return -EOPNOTSUPP;
1596		if (optlen < sizeof(int))
1597			return -EINVAL;
1598
1599		return ip6mr_sk_init(mrt, sk);
1600
1601	case MRT6_DONE:
1602		return ip6mr_sk_done(sk);
1603
1604	case MRT6_ADD_MIF:
1605		if (optlen < sizeof(vif))
1606			return -EINVAL;
1607		if (copy_from_user(&vif, optval, sizeof(vif)))
1608			return -EFAULT;
1609		if (vif.mif6c_mifi >= MAXMIFS)
1610			return -ENFILE;
1611		rtnl_lock();
1612		ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1613		rtnl_unlock();
1614		return ret;
1615
1616	case MRT6_DEL_MIF:
1617		if (optlen < sizeof(mifi_t))
1618			return -EINVAL;
1619		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1620			return -EFAULT;
1621		rtnl_lock();
1622		ret = mif6_delete(mrt, mifi, NULL);
1623		rtnl_unlock();
1624		return ret;
1625
1626	/*
1627	 *	Manipulate the forwarding caches. These live
1628	 *	in a sort of kernel/user symbiosis.
1629	 */
1630	case MRT6_ADD_MFC:
1631	case MRT6_DEL_MFC:
 
 
 
1632		if (optlen < sizeof(mfc))
1633			return -EINVAL;
1634		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1635			return -EFAULT;
 
 
1636		rtnl_lock();
1637		if (optname == MRT6_DEL_MFC)
1638			ret = ip6mr_mfc_delete(mrt, &mfc);
1639		else
1640			ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
 
1641		rtnl_unlock();
1642		return ret;
1643
1644	/*
1645	 *	Control PIM assert (to activate pim will activate assert)
1646	 */
1647	case MRT6_ASSERT:
1648	{
1649		int v;
 
 
 
1650		if (get_user(v, (int __user *)optval))
1651			return -EFAULT;
1652		mrt->mroute_do_assert = !!v;
1653		return 0;
1654	}
1655
1656#ifdef CONFIG_IPV6_PIMSM_V2
1657	case MRT6_PIM:
1658	{
1659		int v;
 
 
 
1660		if (get_user(v, (int __user *)optval))
1661			return -EFAULT;
1662		v = !!v;
1663		rtnl_lock();
1664		ret = 0;
1665		if (v != mrt->mroute_do_pim) {
1666			mrt->mroute_do_pim = v;
1667			mrt->mroute_do_assert = v;
1668		}
1669		rtnl_unlock();
1670		return ret;
1671	}
1672
1673#endif
1674#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1675	case MRT6_TABLE:
1676	{
1677		u32 v;
1678
1679		if (optlen != sizeof(u32))
1680			return -EINVAL;
1681		if (get_user(v, (u32 __user *)optval))
1682			return -EFAULT;
 
 
 
1683		if (sk == mrt->mroute6_sk)
1684			return -EBUSY;
1685
1686		rtnl_lock();
1687		ret = 0;
1688		if (!ip6mr_new_table(net, v))
1689			ret = -ENOMEM;
1690		raw6_sk(sk)->ip6mr_table = v;
1691		rtnl_unlock();
1692		return ret;
1693	}
1694#endif
1695	/*
1696	 *	Spurious command, or MRT6_VERSION which you cannot
1697	 *	set.
1698	 */
1699	default:
1700		return -ENOPROTOOPT;
1701	}
1702}
1703
1704/*
1705 *	Getsock opt support for the multicast routing system.
1706 */
1707
1708int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1709			  int __user *optlen)
1710{
1711	int olr;
1712	int val;
1713	struct net *net = sock_net(sk);
1714	struct mr6_table *mrt;
1715
1716	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1717	if (mrt == NULL)
1718		return -ENOENT;
1719
1720	switch (optname) {
1721	case MRT6_VERSION:
1722		val = 0x0305;
1723		break;
1724#ifdef CONFIG_IPV6_PIMSM_V2
1725	case MRT6_PIM:
1726		val = mrt->mroute_do_pim;
1727		break;
1728#endif
1729	case MRT6_ASSERT:
1730		val = mrt->mroute_do_assert;
1731		break;
1732	default:
1733		return -ENOPROTOOPT;
1734	}
1735
1736	if (get_user(olr, optlen))
1737		return -EFAULT;
1738
1739	olr = min_t(int, olr, sizeof(int));
1740	if (olr < 0)
1741		return -EINVAL;
1742
1743	if (put_user(olr, optlen))
1744		return -EFAULT;
1745	if (copy_to_user(optval, &val, olr))
1746		return -EFAULT;
1747	return 0;
1748}
1749
1750/*
1751 *	The IP multicast ioctl support routines.
1752 */
1753
1754int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1755{
1756	struct sioc_sg_req6 sr;
1757	struct sioc_mif_req6 vr;
1758	struct mif_device *vif;
1759	struct mfc6_cache *c;
1760	struct net *net = sock_net(sk);
1761	struct mr6_table *mrt;
1762
1763	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1764	if (mrt == NULL)
1765		return -ENOENT;
1766
1767	switch (cmd) {
1768	case SIOCGETMIFCNT_IN6:
1769		if (copy_from_user(&vr, arg, sizeof(vr)))
1770			return -EFAULT;
1771		if (vr.mifi >= mrt->maxvif)
1772			return -EINVAL;
1773		read_lock(&mrt_lock);
1774		vif = &mrt->vif6_table[vr.mifi];
1775		if (MIF_EXISTS(mrt, vr.mifi)) {
1776			vr.icount = vif->pkt_in;
1777			vr.ocount = vif->pkt_out;
1778			vr.ibytes = vif->bytes_in;
1779			vr.obytes = vif->bytes_out;
1780			read_unlock(&mrt_lock);
1781
1782			if (copy_to_user(arg, &vr, sizeof(vr)))
1783				return -EFAULT;
1784			return 0;
1785		}
1786		read_unlock(&mrt_lock);
1787		return -EADDRNOTAVAIL;
1788	case SIOCGETSGCNT_IN6:
1789		if (copy_from_user(&sr, arg, sizeof(sr)))
1790			return -EFAULT;
1791
1792		read_lock(&mrt_lock);
1793		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1794		if (c) {
1795			sr.pktcnt = c->mfc_un.res.pkt;
1796			sr.bytecnt = c->mfc_un.res.bytes;
1797			sr.wrong_if = c->mfc_un.res.wrong_if;
1798			read_unlock(&mrt_lock);
1799
1800			if (copy_to_user(arg, &sr, sizeof(sr)))
1801				return -EFAULT;
1802			return 0;
1803		}
1804		read_unlock(&mrt_lock);
1805		return -EADDRNOTAVAIL;
1806	default:
1807		return -ENOIOCTLCMD;
1808	}
1809}
1810
1811#ifdef CONFIG_COMPAT
1812struct compat_sioc_sg_req6 {
1813	struct sockaddr_in6 src;
1814	struct sockaddr_in6 grp;
1815	compat_ulong_t pktcnt;
1816	compat_ulong_t bytecnt;
1817	compat_ulong_t wrong_if;
1818};
1819
1820struct compat_sioc_mif_req6 {
1821	mifi_t	mifi;
1822	compat_ulong_t icount;
1823	compat_ulong_t ocount;
1824	compat_ulong_t ibytes;
1825	compat_ulong_t obytes;
1826};
1827
1828int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1829{
1830	struct compat_sioc_sg_req6 sr;
1831	struct compat_sioc_mif_req6 vr;
1832	struct mif_device *vif;
1833	struct mfc6_cache *c;
1834	struct net *net = sock_net(sk);
1835	struct mr6_table *mrt;
1836
1837	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1838	if (mrt == NULL)
1839		return -ENOENT;
1840
1841	switch (cmd) {
1842	case SIOCGETMIFCNT_IN6:
1843		if (copy_from_user(&vr, arg, sizeof(vr)))
1844			return -EFAULT;
1845		if (vr.mifi >= mrt->maxvif)
1846			return -EINVAL;
1847		read_lock(&mrt_lock);
1848		vif = &mrt->vif6_table[vr.mifi];
1849		if (MIF_EXISTS(mrt, vr.mifi)) {
1850			vr.icount = vif->pkt_in;
1851			vr.ocount = vif->pkt_out;
1852			vr.ibytes = vif->bytes_in;
1853			vr.obytes = vif->bytes_out;
1854			read_unlock(&mrt_lock);
1855
1856			if (copy_to_user(arg, &vr, sizeof(vr)))
1857				return -EFAULT;
1858			return 0;
1859		}
1860		read_unlock(&mrt_lock);
1861		return -EADDRNOTAVAIL;
1862	case SIOCGETSGCNT_IN6:
1863		if (copy_from_user(&sr, arg, sizeof(sr)))
1864			return -EFAULT;
1865
1866		read_lock(&mrt_lock);
1867		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1868		if (c) {
1869			sr.pktcnt = c->mfc_un.res.pkt;
1870			sr.bytecnt = c->mfc_un.res.bytes;
1871			sr.wrong_if = c->mfc_un.res.wrong_if;
1872			read_unlock(&mrt_lock);
1873
1874			if (copy_to_user(arg, &sr, sizeof(sr)))
1875				return -EFAULT;
1876			return 0;
1877		}
1878		read_unlock(&mrt_lock);
1879		return -EADDRNOTAVAIL;
1880	default:
1881		return -ENOIOCTLCMD;
1882	}
1883}
1884#endif
1885
1886static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1887{
1888	IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1889			 IPSTATS_MIB_OUTFORWDATAGRAMS);
 
 
1890	return dst_output(skb);
1891}
1892
1893/*
1894 *	Processing handlers for ip6mr_forward
1895 */
1896
1897static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1898			  struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1899{
1900	struct ipv6hdr *ipv6h;
1901	struct mif_device *vif = &mrt->vif6_table[vifi];
1902	struct net_device *dev;
1903	struct dst_entry *dst;
1904	struct flowi6 fl6;
1905
1906	if (vif->dev == NULL)
1907		goto out_free;
1908
1909#ifdef CONFIG_IPV6_PIMSM_V2
1910	if (vif->flags & MIFF_REGISTER) {
1911		vif->pkt_out++;
1912		vif->bytes_out += skb->len;
1913		vif->dev->stats.tx_bytes += skb->len;
1914		vif->dev->stats.tx_packets++;
1915		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1916		goto out_free;
1917	}
1918#endif
1919
1920	ipv6h = ipv6_hdr(skb);
1921
1922	fl6 = (struct flowi6) {
1923		.flowi6_oif = vif->link,
1924		.daddr = ipv6h->daddr,
1925	};
1926
1927	dst = ip6_route_output(net, NULL, &fl6);
1928	if (!dst)
 
1929		goto out_free;
 
1930
1931	skb_dst_drop(skb);
1932	skb_dst_set(skb, dst);
1933
1934	/*
1935	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1936	 * not only before forwarding, but after forwarding on all output
1937	 * interfaces. It is clear, if mrouter runs a multicasting
1938	 * program, it should receive packets not depending to what interface
1939	 * program is joined.
1940	 * If we will not make it, the program will have to join on all
1941	 * interfaces. On the other hand, multihoming host (or router, but
1942	 * not mrouter) cannot join to more than one interface - it will
1943	 * result in receiving multiple packets.
1944	 */
1945	dev = vif->dev;
1946	skb->dev = dev;
1947	vif->pkt_out++;
1948	vif->bytes_out += skb->len;
1949
1950	/* We are about to write */
1951	/* XXX: extension headers? */
1952	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1953		goto out_free;
1954
1955	ipv6h = ipv6_hdr(skb);
1956	ipv6h->hop_limit--;
1957
1958	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1959
1960	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
1961		       ip6mr_forward2_finish);
1962
1963out_free:
1964	kfree_skb(skb);
1965	return 0;
1966}
1967
1968static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
1969{
1970	int ct;
1971
1972	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
1973		if (mrt->vif6_table[ct].dev == dev)
1974			break;
1975	}
1976	return ct;
1977}
1978
1979static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
1980			  struct sk_buff *skb, struct mfc6_cache *cache)
1981{
1982	int psend = -1;
1983	int vif, ct;
 
1984
1985	vif = cache->mf6c_parent;
1986	cache->mfc_un.res.pkt++;
1987	cache->mfc_un.res.bytes += skb->len;
1988
 
 
 
 
 
 
 
 
 
 
 
 
1989	/*
1990	 * Wrong interface: drop packet and (maybe) send PIM assert.
1991	 */
1992	if (mrt->vif6_table[vif].dev != skb->dev) {
1993		int true_vifi;
1994
1995		cache->mfc_un.res.wrong_if++;
1996		true_vifi = ip6mr_find_vif(mrt, skb->dev);
1997
1998		if (true_vifi >= 0 && mrt->mroute_do_assert &&
1999		    /* pimsm uses asserts, when switching from RPT to SPT,
2000		       so that we cannot check that packet arrived on an oif.
2001		       It is bad, but otherwise we would need to move pretty
2002		       large chunk of pimd to kernel. Ough... --ANK
2003		     */
2004		    (mrt->mroute_do_pim ||
2005		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2006		    time_after(jiffies,
2007			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2008			cache->mfc_un.res.last_assert = jiffies;
2009			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2010		}
2011		goto dont_forward;
2012	}
2013
 
2014	mrt->vif6_table[vif].pkt_in++;
2015	mrt->vif6_table[vif].bytes_in += skb->len;
2016
2017	/*
2018	 *	Forward the frame
2019	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2020	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2021		if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
 
 
2022			if (psend != -1) {
2023				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2024				if (skb2)
2025					ip6mr_forward2(net, mrt, skb2, cache, psend);
2026			}
2027			psend = ct;
2028		}
2029	}
 
2030	if (psend != -1) {
2031		ip6mr_forward2(net, mrt, skb, cache, psend);
2032		return 0;
2033	}
2034
2035dont_forward:
2036	kfree_skb(skb);
2037	return 0;
2038}
2039
2040
2041/*
2042 *	Multicast packets for forwarding arrive here
2043 */
2044
2045int ip6_mr_input(struct sk_buff *skb)
2046{
2047	struct mfc6_cache *cache;
2048	struct net *net = dev_net(skb->dev);
2049	struct mr6_table *mrt;
2050	struct flowi6 fl6 = {
2051		.flowi6_iif	= skb->dev->ifindex,
2052		.flowi6_mark	= skb->mark,
2053	};
2054	int err;
2055
2056	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2057	if (err < 0) {
2058		kfree_skb(skb);
2059		return err;
2060	}
2061
2062	read_lock(&mrt_lock);
2063	cache = ip6mr_cache_find(mrt,
2064				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
 
 
 
 
 
 
 
 
2065
2066	/*
2067	 *	No usable cache entry
2068	 */
2069	if (cache == NULL) {
2070		int vif;
2071
2072		vif = ip6mr_find_vif(mrt, skb->dev);
2073		if (vif >= 0) {
2074			int err = ip6mr_cache_unresolved(mrt, vif, skb);
2075			read_unlock(&mrt_lock);
2076
2077			return err;
2078		}
2079		read_unlock(&mrt_lock);
2080		kfree_skb(skb);
2081		return -ENODEV;
2082	}
2083
2084	ip6_mr_forward(net, mrt, skb, cache);
2085
2086	read_unlock(&mrt_lock);
2087
2088	return 0;
2089}
2090
2091
2092static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2093			       struct mfc6_cache *c, struct rtmsg *rtm)
2094{
2095	int ct;
2096	struct rtnexthop *nhp;
2097	u8 *b = skb_tail_pointer(skb);
2098	struct rtattr *mp_head;
2099
2100	/* If cache is unresolved, don't try to parse IIF and OIF */
2101	if (c->mf6c_parent >= MAXMIFS)
2102		return -ENOENT;
2103
2104	if (MIF_EXISTS(mrt, c->mf6c_parent))
2105		RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
2106
2107	mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
 
 
2108
2109	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2110		if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2111			if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2112				goto rtattr_failure;
2113			nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
 
 
 
2114			nhp->rtnh_flags = 0;
2115			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2116			nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2117			nhp->rtnh_len = sizeof(*nhp);
2118		}
2119	}
2120	mp_head->rta_type = RTA_MULTIPATH;
2121	mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
 
 
 
 
 
 
 
2122	rtm->rtm_type = RTN_MULTICAST;
2123	return 1;
2124
2125rtattr_failure:
2126	nlmsg_trim(skb, b);
2127	return -EMSGSIZE;
2128}
2129
2130int ip6mr_get_route(struct net *net,
2131		    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2132{
2133	int err;
2134	struct mr6_table *mrt;
2135	struct mfc6_cache *cache;
2136	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2137
2138	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2139	if (mrt == NULL)
2140		return -ENOENT;
2141
2142	read_lock(&mrt_lock);
2143	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
 
 
 
 
 
 
 
2144
2145	if (!cache) {
2146		struct sk_buff *skb2;
2147		struct ipv6hdr *iph;
2148		struct net_device *dev;
2149		int vif;
2150
2151		if (nowait) {
2152			read_unlock(&mrt_lock);
2153			return -EAGAIN;
2154		}
2155
2156		dev = skb->dev;
2157		if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2158			read_unlock(&mrt_lock);
2159			return -ENODEV;
2160		}
2161
2162		/* really correct? */
2163		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2164		if (!skb2) {
2165			read_unlock(&mrt_lock);
2166			return -ENOMEM;
2167		}
2168
2169		skb_reset_transport_header(skb2);
2170
2171		skb_put(skb2, sizeof(struct ipv6hdr));
2172		skb_reset_network_header(skb2);
2173
2174		iph = ipv6_hdr(skb2);
2175		iph->version = 0;
2176		iph->priority = 0;
2177		iph->flow_lbl[0] = 0;
2178		iph->flow_lbl[1] = 0;
2179		iph->flow_lbl[2] = 0;
2180		iph->payload_len = 0;
2181		iph->nexthdr = IPPROTO_NONE;
2182		iph->hop_limit = 0;
2183		ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
2184		ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
2185
2186		err = ip6mr_cache_unresolved(mrt, vif, skb2);
2187		read_unlock(&mrt_lock);
2188
2189		return err;
2190	}
2191
2192	if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2193		cache->mfc_flags |= MFC_NOTIFY;
2194
2195	err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2196	read_unlock(&mrt_lock);
2197	return err;
2198}
2199
2200static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2201			     u32 pid, u32 seq, struct mfc6_cache *c)
 
2202{
2203	struct nlmsghdr *nlh;
2204	struct rtmsg *rtm;
 
2205
2206	nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2207	if (nlh == NULL)
2208		return -EMSGSIZE;
2209
2210	rtm = nlmsg_data(nlh);
2211	rtm->rtm_family   = RTNL_FAMILY_IPMR;
2212	rtm->rtm_dst_len  = 128;
2213	rtm->rtm_src_len  = 128;
2214	rtm->rtm_tos      = 0;
2215	rtm->rtm_table    = mrt->id;
2216	NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
 
 
2217	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2218	rtm->rtm_protocol = RTPROT_UNSPEC;
 
 
 
2219	rtm->rtm_flags    = 0;
2220
2221	NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
2222	NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
2223
2224	if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
 
 
2225		goto nla_put_failure;
2226
2227	return nlmsg_end(skb, nlh);
2228
2229nla_put_failure:
2230	nlmsg_cancel(skb, nlh);
2231	return -EMSGSIZE;
2232}
2233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2234static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2235{
2236	struct net *net = sock_net(skb->sk);
2237	struct mr6_table *mrt;
2238	struct mfc6_cache *mfc;
2239	unsigned int t = 0, s_t;
2240	unsigned int h = 0, s_h;
2241	unsigned int e = 0, s_e;
2242
2243	s_t = cb->args[0];
2244	s_h = cb->args[1];
2245	s_e = cb->args[2];
2246
2247	read_lock(&mrt_lock);
2248	ip6mr_for_each_table(mrt, net) {
2249		if (t < s_t)
2250			goto next_table;
2251		if (t > s_t)
2252			s_h = 0;
2253		for (h = s_h; h < MFC6_LINES; h++) {
2254			list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2255				if (e < s_e)
2256					goto next_entry;
2257				if (ip6mr_fill_mroute(mrt, skb,
2258						      NETLINK_CB(cb->skb).pid,
2259						      cb->nlh->nlmsg_seq,
2260						      mfc) < 0)
 
2261					goto done;
2262next_entry:
2263				e++;
2264			}
2265			e = s_e = 0;
2266		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2267		s_h = 0;
2268next_table:
2269		t++;
2270	}
2271done:
2272	read_unlock(&mrt_lock);
2273
2274	cb->args[2] = e;
2275	cb->args[1] = h;
2276	cb->args[0] = t;
2277
2278	return skb->len;
2279}
v3.15
   1/*
   2 *	Linux IPv6 multicast routing support for BSD pim6sd
   3 *	Based on net/ipv4/ipmr.c.
   4 *
   5 *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
   6 *		LSIIT Laboratory, Strasbourg, France
   7 *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
   8 *		6WIND, Paris, France
   9 *	Copyright (C)2007,2008 USAGI/WIDE Project
  10 *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
  11 *
  12 *	This program is free software; you can redistribute it and/or
  13 *	modify it under the terms of the GNU General Public License
  14 *	as published by the Free Software Foundation; either version
  15 *	2 of the License, or (at your option) any later version.
  16 *
  17 */
  18
 
  19#include <asm/uaccess.h>
  20#include <linux/types.h>
  21#include <linux/sched.h>
  22#include <linux/errno.h>
  23#include <linux/timer.h>
  24#include <linux/mm.h>
  25#include <linux/kernel.h>
  26#include <linux/fcntl.h>
  27#include <linux/stat.h>
  28#include <linux/socket.h>
  29#include <linux/inet.h>
  30#include <linux/netdevice.h>
  31#include <linux/inetdevice.h>
  32#include <linux/proc_fs.h>
  33#include <linux/seq_file.h>
  34#include <linux/init.h>
  35#include <linux/slab.h>
  36#include <linux/compat.h>
  37#include <net/protocol.h>
  38#include <linux/skbuff.h>
  39#include <net/sock.h>
  40#include <net/raw.h>
  41#include <linux/notifier.h>
  42#include <linux/if_arp.h>
  43#include <net/checksum.h>
  44#include <net/netlink.h>
  45#include <net/fib_rules.h>
  46
  47#include <net/ipv6.h>
  48#include <net/ip6_route.h>
  49#include <linux/mroute6.h>
  50#include <linux/pim.h>
  51#include <net/addrconf.h>
  52#include <linux/netfilter_ipv6.h>
  53#include <linux/export.h>
  54#include <net/ip6_checksum.h>
  55#include <linux/netconf.h>
  56
  57struct mr6_table {
  58	struct list_head	list;
  59#ifdef CONFIG_NET_NS
  60	struct net		*net;
  61#endif
  62	u32			id;
  63	struct sock		*mroute6_sk;
  64	struct timer_list	ipmr_expire_timer;
  65	struct list_head	mfc6_unres_queue;
  66	struct list_head	mfc6_cache_array[MFC6_LINES];
  67	struct mif_device	vif6_table[MAXMIFS];
  68	int			maxvif;
  69	atomic_t		cache_resolve_queue_len;
  70	bool			mroute_do_assert;
  71	bool			mroute_do_pim;
  72#ifdef CONFIG_IPV6_PIMSM_V2
  73	int			mroute_reg_vif_num;
  74#endif
  75};
  76
  77struct ip6mr_rule {
  78	struct fib_rule		common;
  79};
  80
  81struct ip6mr_result {
  82	struct mr6_table	*mrt;
  83};
  84
  85/* Big lock, protecting vif table, mrt cache and mroute socket state.
  86   Note that the changes are semaphored via rtnl_lock.
  87 */
  88
  89static DEFINE_RWLOCK(mrt_lock);
  90
  91/*
  92 *	Multicast router control variables
  93 */
  94
  95#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
  96
  97/* Special spinlock for queue of unresolved entries */
  98static DEFINE_SPINLOCK(mfc_unres_lock);
  99
 100/* We return to original Alan's scheme. Hash table of resolved
 101   entries is changed only in process context and protected
 102   with weak lock mrt_lock. Queue of unresolved entries is protected
 103   with strong spinlock mfc_unres_lock.
 104
 105   In this case data path is free of exclusive locks at all.
 106 */
 107
 108static struct kmem_cache *mrt_cachep __read_mostly;
 109
 110static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
 111static void ip6mr_free_table(struct mr6_table *mrt);
 112
 113static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 114			   struct sk_buff *skb, struct mfc6_cache *cache);
 115static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
 116			      mifi_t mifi, int assert);
 117static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 118			       struct mfc6_cache *c, struct rtmsg *rtm);
 119static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
 120			      int cmd);
 121static int ip6mr_rtm_dumproute(struct sk_buff *skb,
 122			       struct netlink_callback *cb);
 123static void mroute_clean_tables(struct mr6_table *mrt);
 124static void ipmr_expire_process(unsigned long arg);
 125
 126#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 127#define ip6mr_for_each_table(mrt, net) \
 128	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
 129
 130static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 131{
 132	struct mr6_table *mrt;
 133
 134	ip6mr_for_each_table(mrt, net) {
 135		if (mrt->id == id)
 136			return mrt;
 137	}
 138	return NULL;
 139}
 140
 141static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 142			    struct mr6_table **mrt)
 143{
 
 
 144	int err;
 145	struct ip6mr_result res;
 146	struct fib_lookup_arg arg = {
 147		.result = &res,
 148		.flags = FIB_LOOKUP_NOREF,
 149	};
 150
 151	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 152			       flowi6_to_flowi(flp6), 0, &arg);
 153	if (err < 0)
 154		return err;
 155	*mrt = res.mrt;
 156	return 0;
 157}
 158
 159static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
 160			     int flags, struct fib_lookup_arg *arg)
 161{
 162	struct ip6mr_result *res = arg->result;
 163	struct mr6_table *mrt;
 164
 165	switch (rule->action) {
 166	case FR_ACT_TO_TBL:
 167		break;
 168	case FR_ACT_UNREACHABLE:
 169		return -ENETUNREACH;
 170	case FR_ACT_PROHIBIT:
 171		return -EACCES;
 172	case FR_ACT_BLACKHOLE:
 173	default:
 174		return -EINVAL;
 175	}
 176
 177	mrt = ip6mr_get_table(rule->fr_net, rule->table);
 178	if (mrt == NULL)
 179		return -EAGAIN;
 180	res->mrt = mrt;
 181	return 0;
 182}
 183
 184static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
 185{
 186	return 1;
 187}
 188
 189static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
 190	FRA_GENERIC_POLICY,
 191};
 192
 193static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
 194				struct fib_rule_hdr *frh, struct nlattr **tb)
 195{
 196	return 0;
 197}
 198
 199static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 200			      struct nlattr **tb)
 201{
 202	return 1;
 203}
 204
 205static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
 206			   struct fib_rule_hdr *frh)
 207{
 208	frh->dst_len = 0;
 209	frh->src_len = 0;
 210	frh->tos     = 0;
 211	return 0;
 212}
 213
 214static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
 215	.family		= RTNL_FAMILY_IP6MR,
 216	.rule_size	= sizeof(struct ip6mr_rule),
 217	.addr_size	= sizeof(struct in6_addr),
 218	.action		= ip6mr_rule_action,
 219	.match		= ip6mr_rule_match,
 220	.configure	= ip6mr_rule_configure,
 221	.compare	= ip6mr_rule_compare,
 222	.default_pref	= fib_default_rule_pref,
 223	.fill		= ip6mr_rule_fill,
 224	.nlgroup	= RTNLGRP_IPV6_RULE,
 225	.policy		= ip6mr_rule_policy,
 226	.owner		= THIS_MODULE,
 227};
 228
 229static int __net_init ip6mr_rules_init(struct net *net)
 230{
 231	struct fib_rules_ops *ops;
 232	struct mr6_table *mrt;
 233	int err;
 234
 235	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
 236	if (IS_ERR(ops))
 237		return PTR_ERR(ops);
 238
 239	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
 240
 241	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
 242	if (mrt == NULL) {
 243		err = -ENOMEM;
 244		goto err1;
 245	}
 246
 247	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
 248	if (err < 0)
 249		goto err2;
 250
 251	net->ipv6.mr6_rules_ops = ops;
 252	return 0;
 253
 254err2:
 255	kfree(mrt);
 256err1:
 257	fib_rules_unregister(ops);
 258	return err;
 259}
 260
 261static void __net_exit ip6mr_rules_exit(struct net *net)
 262{
 263	struct mr6_table *mrt, *next;
 264
 265	rtnl_lock();
 266	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
 267		list_del(&mrt->list);
 268		ip6mr_free_table(mrt);
 269	}
 270	rtnl_unlock();
 271	fib_rules_unregister(net->ipv6.mr6_rules_ops);
 272}
 273#else
 274#define ip6mr_for_each_table(mrt, net) \
 275	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
 276
 277static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 278{
 279	return net->ipv6.mrt6;
 280}
 281
 282static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 283			    struct mr6_table **mrt)
 284{
 285	*mrt = net->ipv6.mrt6;
 286	return 0;
 287}
 288
 289static int __net_init ip6mr_rules_init(struct net *net)
 290{
 291	net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
 292	return net->ipv6.mrt6 ? 0 : -ENOMEM;
 293}
 294
 295static void __net_exit ip6mr_rules_exit(struct net *net)
 296{
 297	rtnl_lock();
 298	ip6mr_free_table(net->ipv6.mrt6);
 299	net->ipv6.mrt6 = NULL;
 300	rtnl_unlock();
 301}
 302#endif
 303
 304static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 305{
 306	struct mr6_table *mrt;
 307	unsigned int i;
 308
 309	mrt = ip6mr_get_table(net, id);
 310	if (mrt != NULL)
 311		return mrt;
 312
 313	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
 314	if (mrt == NULL)
 315		return NULL;
 316	mrt->id = id;
 317	write_pnet(&mrt->net, net);
 318
 319	/* Forwarding cache */
 320	for (i = 0; i < MFC6_LINES; i++)
 321		INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
 322
 323	INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
 324
 325	setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
 326		    (unsigned long)mrt);
 327
 328#ifdef CONFIG_IPV6_PIMSM_V2
 329	mrt->mroute_reg_vif_num = -1;
 330#endif
 331#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 332	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
 333#endif
 334	return mrt;
 335}
 336
 337static void ip6mr_free_table(struct mr6_table *mrt)
 338{
 339	del_timer(&mrt->ipmr_expire_timer);
 340	mroute_clean_tables(mrt);
 341	kfree(mrt);
 342}
 343
 344#ifdef CONFIG_PROC_FS
 345
 346struct ipmr_mfc_iter {
 347	struct seq_net_private p;
 348	struct mr6_table *mrt;
 349	struct list_head *cache;
 350	int ct;
 351};
 352
 353
 354static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
 355					   struct ipmr_mfc_iter *it, loff_t pos)
 356{
 357	struct mr6_table *mrt = it->mrt;
 358	struct mfc6_cache *mfc;
 359
 360	read_lock(&mrt_lock);
 361	for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
 362		it->cache = &mrt->mfc6_cache_array[it->ct];
 363		list_for_each_entry(mfc, it->cache, list)
 364			if (pos-- == 0)
 365				return mfc;
 366	}
 367	read_unlock(&mrt_lock);
 368
 369	spin_lock_bh(&mfc_unres_lock);
 370	it->cache = &mrt->mfc6_unres_queue;
 371	list_for_each_entry(mfc, it->cache, list)
 372		if (pos-- == 0)
 373			return mfc;
 374	spin_unlock_bh(&mfc_unres_lock);
 375
 376	it->cache = NULL;
 377	return NULL;
 378}
 379
 380/*
 381 *	The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
 382 */
 383
 384struct ipmr_vif_iter {
 385	struct seq_net_private p;
 386	struct mr6_table *mrt;
 387	int ct;
 388};
 389
 390static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
 391					    struct ipmr_vif_iter *iter,
 392					    loff_t pos)
 393{
 394	struct mr6_table *mrt = iter->mrt;
 395
 396	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
 397		if (!MIF_EXISTS(mrt, iter->ct))
 398			continue;
 399		if (pos-- == 0)
 400			return &mrt->vif6_table[iter->ct];
 401	}
 402	return NULL;
 403}
 404
 405static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
 406	__acquires(mrt_lock)
 407{
 408	struct ipmr_vif_iter *iter = seq->private;
 409	struct net *net = seq_file_net(seq);
 410	struct mr6_table *mrt;
 411
 412	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 413	if (mrt == NULL)
 414		return ERR_PTR(-ENOENT);
 415
 416	iter->mrt = mrt;
 417
 418	read_lock(&mrt_lock);
 419	return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
 420		: SEQ_START_TOKEN;
 421}
 422
 423static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 424{
 425	struct ipmr_vif_iter *iter = seq->private;
 426	struct net *net = seq_file_net(seq);
 427	struct mr6_table *mrt = iter->mrt;
 428
 429	++*pos;
 430	if (v == SEQ_START_TOKEN)
 431		return ip6mr_vif_seq_idx(net, iter, 0);
 432
 433	while (++iter->ct < mrt->maxvif) {
 434		if (!MIF_EXISTS(mrt, iter->ct))
 435			continue;
 436		return &mrt->vif6_table[iter->ct];
 437	}
 438	return NULL;
 439}
 440
 441static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
 442	__releases(mrt_lock)
 443{
 444	read_unlock(&mrt_lock);
 445}
 446
 447static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
 448{
 449	struct ipmr_vif_iter *iter = seq->private;
 450	struct mr6_table *mrt = iter->mrt;
 451
 452	if (v == SEQ_START_TOKEN) {
 453		seq_puts(seq,
 454			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
 455	} else {
 456		const struct mif_device *vif = v;
 457		const char *name = vif->dev ? vif->dev->name : "none";
 458
 459		seq_printf(seq,
 460			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
 461			   vif - mrt->vif6_table,
 462			   name, vif->bytes_in, vif->pkt_in,
 463			   vif->bytes_out, vif->pkt_out,
 464			   vif->flags);
 465	}
 466	return 0;
 467}
 468
 469static const struct seq_operations ip6mr_vif_seq_ops = {
 470	.start = ip6mr_vif_seq_start,
 471	.next  = ip6mr_vif_seq_next,
 472	.stop  = ip6mr_vif_seq_stop,
 473	.show  = ip6mr_vif_seq_show,
 474};
 475
 476static int ip6mr_vif_open(struct inode *inode, struct file *file)
 477{
 478	return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
 479			    sizeof(struct ipmr_vif_iter));
 480}
 481
 482static const struct file_operations ip6mr_vif_fops = {
 483	.owner	 = THIS_MODULE,
 484	.open    = ip6mr_vif_open,
 485	.read    = seq_read,
 486	.llseek  = seq_lseek,
 487	.release = seq_release_net,
 488};
 489
 490static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 491{
 492	struct ipmr_mfc_iter *it = seq->private;
 493	struct net *net = seq_file_net(seq);
 494	struct mr6_table *mrt;
 495
 496	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
 497	if (mrt == NULL)
 498		return ERR_PTR(-ENOENT);
 499
 500	it->mrt = mrt;
 501	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
 502		: SEQ_START_TOKEN;
 503}
 504
 505static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 506{
 507	struct mfc6_cache *mfc = v;
 508	struct ipmr_mfc_iter *it = seq->private;
 509	struct net *net = seq_file_net(seq);
 510	struct mr6_table *mrt = it->mrt;
 511
 512	++*pos;
 513
 514	if (v == SEQ_START_TOKEN)
 515		return ipmr_mfc_seq_idx(net, seq->private, 0);
 516
 517	if (mfc->list.next != it->cache)
 518		return list_entry(mfc->list.next, struct mfc6_cache, list);
 519
 520	if (it->cache == &mrt->mfc6_unres_queue)
 521		goto end_of_list;
 522
 523	BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
 524
 525	while (++it->ct < MFC6_LINES) {
 526		it->cache = &mrt->mfc6_cache_array[it->ct];
 527		if (list_empty(it->cache))
 528			continue;
 529		return list_first_entry(it->cache, struct mfc6_cache, list);
 530	}
 531
 532	/* exhausted cache_array, show unresolved */
 533	read_unlock(&mrt_lock);
 534	it->cache = &mrt->mfc6_unres_queue;
 535	it->ct = 0;
 536
 537	spin_lock_bh(&mfc_unres_lock);
 538	if (!list_empty(it->cache))
 539		return list_first_entry(it->cache, struct mfc6_cache, list);
 540
 541 end_of_list:
 542	spin_unlock_bh(&mfc_unres_lock);
 543	it->cache = NULL;
 544
 545	return NULL;
 546}
 547
 548static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
 549{
 550	struct ipmr_mfc_iter *it = seq->private;
 551	struct mr6_table *mrt = it->mrt;
 552
 553	if (it->cache == &mrt->mfc6_unres_queue)
 554		spin_unlock_bh(&mfc_unres_lock);
 555	else if (it->cache == mrt->mfc6_cache_array)
 556		read_unlock(&mrt_lock);
 557}
 558
 559static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
 560{
 561	int n;
 562
 563	if (v == SEQ_START_TOKEN) {
 564		seq_puts(seq,
 565			 "Group                            "
 566			 "Origin                           "
 567			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
 568	} else {
 569		const struct mfc6_cache *mfc = v;
 570		const struct ipmr_mfc_iter *it = seq->private;
 571		struct mr6_table *mrt = it->mrt;
 572
 573		seq_printf(seq, "%pI6 %pI6 %-3hd",
 574			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
 575			   mfc->mf6c_parent);
 576
 577		if (it->cache != &mrt->mfc6_unres_queue) {
 578			seq_printf(seq, " %8lu %8lu %8lu",
 579				   mfc->mfc_un.res.pkt,
 580				   mfc->mfc_un.res.bytes,
 581				   mfc->mfc_un.res.wrong_if);
 582			for (n = mfc->mfc_un.res.minvif;
 583			     n < mfc->mfc_un.res.maxvif; n++) {
 584				if (MIF_EXISTS(mrt, n) &&
 585				    mfc->mfc_un.res.ttls[n] < 255)
 586					seq_printf(seq,
 587						   " %2d:%-3d",
 588						   n, mfc->mfc_un.res.ttls[n]);
 589			}
 590		} else {
 591			/* unresolved mfc_caches don't contain
 592			 * pkt, bytes and wrong_if values
 593			 */
 594			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
 595		}
 596		seq_putc(seq, '\n');
 597	}
 598	return 0;
 599}
 600
 601static const struct seq_operations ipmr_mfc_seq_ops = {
 602	.start = ipmr_mfc_seq_start,
 603	.next  = ipmr_mfc_seq_next,
 604	.stop  = ipmr_mfc_seq_stop,
 605	.show  = ipmr_mfc_seq_show,
 606};
 607
 608static int ipmr_mfc_open(struct inode *inode, struct file *file)
 609{
 610	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
 611			    sizeof(struct ipmr_mfc_iter));
 612}
 613
 614static const struct file_operations ip6mr_mfc_fops = {
 615	.owner	 = THIS_MODULE,
 616	.open    = ipmr_mfc_open,
 617	.read    = seq_read,
 618	.llseek  = seq_lseek,
 619	.release = seq_release_net,
 620};
 621#endif
 622
 623#ifdef CONFIG_IPV6_PIMSM_V2
 624
 625static int pim6_rcv(struct sk_buff *skb)
 626{
 627	struct pimreghdr *pim;
 628	struct ipv6hdr   *encap;
 629	struct net_device  *reg_dev = NULL;
 630	struct net *net = dev_net(skb->dev);
 631	struct mr6_table *mrt;
 632	struct flowi6 fl6 = {
 633		.flowi6_iif	= skb->dev->ifindex,
 634		.flowi6_mark	= skb->mark,
 635	};
 636	int reg_vif_num;
 637
 638	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
 639		goto drop;
 640
 641	pim = (struct pimreghdr *)skb_transport_header(skb);
 642	if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
 643	    (pim->flags & PIM_NULL_REGISTER) ||
 644	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 645			     sizeof(*pim), IPPROTO_PIM,
 646			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
 647	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
 648		goto drop;
 649
 650	/* check if the inner packet is destined to mcast group */
 651	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
 652				   sizeof(*pim));
 653
 654	if (!ipv6_addr_is_multicast(&encap->daddr) ||
 655	    encap->payload_len == 0 ||
 656	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
 657		goto drop;
 658
 659	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
 660		goto drop;
 661	reg_vif_num = mrt->mroute_reg_vif_num;
 662
 663	read_lock(&mrt_lock);
 664	if (reg_vif_num >= 0)
 665		reg_dev = mrt->vif6_table[reg_vif_num].dev;
 666	if (reg_dev)
 667		dev_hold(reg_dev);
 668	read_unlock(&mrt_lock);
 669
 670	if (reg_dev == NULL)
 671		goto drop;
 672
 673	skb->mac_header = skb->network_header;
 674	skb_pull(skb, (u8 *)encap - skb->data);
 675	skb_reset_network_header(skb);
 676	skb->protocol = htons(ETH_P_IPV6);
 677	skb->ip_summed = CHECKSUM_NONE;
 
 678
 679	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
 680
 681	netif_rx(skb);
 682
 683	dev_put(reg_dev);
 684	return 0;
 685 drop:
 686	kfree_skb(skb);
 687	return 0;
 688}
 689
 690static const struct inet6_protocol pim6_protocol = {
 691	.handler	=	pim6_rcv,
 692};
 693
 694/* Service routines creating virtual interfaces: PIMREG */
 695
 696static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
 697				      struct net_device *dev)
 698{
 699	struct net *net = dev_net(dev);
 700	struct mr6_table *mrt;
 701	struct flowi6 fl6 = {
 702		.flowi6_oif	= dev->ifindex,
 703		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 704		.flowi6_mark	= skb->mark,
 705	};
 706	int err;
 707
 708	err = ip6mr_fib_lookup(net, &fl6, &mrt);
 709	if (err < 0) {
 710		kfree_skb(skb);
 711		return err;
 712	}
 713
 714	read_lock(&mrt_lock);
 715	dev->stats.tx_bytes += skb->len;
 716	dev->stats.tx_packets++;
 717	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
 718	read_unlock(&mrt_lock);
 719	kfree_skb(skb);
 720	return NETDEV_TX_OK;
 721}
 722
 723static const struct net_device_ops reg_vif_netdev_ops = {
 724	.ndo_start_xmit	= reg_vif_xmit,
 725};
 726
 727static void reg_vif_setup(struct net_device *dev)
 728{
 729	dev->type		= ARPHRD_PIMREG;
 730	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
 731	dev->flags		= IFF_NOARP;
 732	dev->netdev_ops		= &reg_vif_netdev_ops;
 733	dev->destructor		= free_netdev;
 734	dev->features		|= NETIF_F_NETNS_LOCAL;
 735}
 736
 737static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
 738{
 739	struct net_device *dev;
 740	char name[IFNAMSIZ];
 741
 742	if (mrt->id == RT6_TABLE_DFLT)
 743		sprintf(name, "pim6reg");
 744	else
 745		sprintf(name, "pim6reg%u", mrt->id);
 746
 747	dev = alloc_netdev(0, name, reg_vif_setup);
 748	if (dev == NULL)
 749		return NULL;
 750
 751	dev_net_set(dev, net);
 752
 753	if (register_netdevice(dev)) {
 754		free_netdev(dev);
 755		return NULL;
 756	}
 757	dev->iflink = 0;
 758
 759	if (dev_open(dev))
 760		goto failure;
 761
 762	dev_hold(dev);
 763	return dev;
 764
 765failure:
 766	/* allow the register to be completed before unregistering. */
 767	rtnl_unlock();
 768	rtnl_lock();
 769
 770	unregister_netdevice(dev);
 771	return NULL;
 772}
 773#endif
 774
 775/*
 776 *	Delete a VIF entry
 777 */
 778
 779static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
 780{
 781	struct mif_device *v;
 782	struct net_device *dev;
 783	struct inet6_dev *in6_dev;
 784
 785	if (vifi < 0 || vifi >= mrt->maxvif)
 786		return -EADDRNOTAVAIL;
 787
 788	v = &mrt->vif6_table[vifi];
 789
 790	write_lock_bh(&mrt_lock);
 791	dev = v->dev;
 792	v->dev = NULL;
 793
 794	if (!dev) {
 795		write_unlock_bh(&mrt_lock);
 796		return -EADDRNOTAVAIL;
 797	}
 798
 799#ifdef CONFIG_IPV6_PIMSM_V2
 800	if (vifi == mrt->mroute_reg_vif_num)
 801		mrt->mroute_reg_vif_num = -1;
 802#endif
 803
 804	if (vifi + 1 == mrt->maxvif) {
 805		int tmp;
 806		for (tmp = vifi - 1; tmp >= 0; tmp--) {
 807			if (MIF_EXISTS(mrt, tmp))
 808				break;
 809		}
 810		mrt->maxvif = tmp + 1;
 811	}
 812
 813	write_unlock_bh(&mrt_lock);
 814
 815	dev_set_allmulti(dev, -1);
 816
 817	in6_dev = __in6_dev_get(dev);
 818	if (in6_dev) {
 819		in6_dev->cnf.mc_forwarding--;
 820		inet6_netconf_notify_devconf(dev_net(dev),
 821					     NETCONFA_MC_FORWARDING,
 822					     dev->ifindex, &in6_dev->cnf);
 823	}
 824
 825	if (v->flags & MIFF_REGISTER)
 826		unregister_netdevice_queue(dev, head);
 827
 828	dev_put(dev);
 829	return 0;
 830}
 831
 832static inline void ip6mr_cache_free(struct mfc6_cache *c)
 833{
 834	kmem_cache_free(mrt_cachep, c);
 835}
 836
 837/* Destroy an unresolved cache entry, killing queued skbs
 838   and reporting error to netlink readers.
 839 */
 840
 841static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
 842{
 843	struct net *net = read_pnet(&mrt->net);
 844	struct sk_buff *skb;
 845
 846	atomic_dec(&mrt->cache_resolve_queue_len);
 847
 848	while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
 849		if (ipv6_hdr(skb)->version == 0) {
 850			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 851			nlh->nlmsg_type = NLMSG_ERROR;
 852			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
 853			skb_trim(skb, nlh->nlmsg_len);
 854			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
 855			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
 856		} else
 857			kfree_skb(skb);
 858	}
 859
 860	ip6mr_cache_free(c);
 861}
 862
 863
 864/* Timer process for all the unresolved queue. */
 865
 866static void ipmr_do_expire_process(struct mr6_table *mrt)
 867{
 868	unsigned long now = jiffies;
 869	unsigned long expires = 10 * HZ;
 870	struct mfc6_cache *c, *next;
 871
 872	list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
 873		if (time_after(c->mfc_un.unres.expires, now)) {
 874			/* not yet... */
 875			unsigned long interval = c->mfc_un.unres.expires - now;
 876			if (interval < expires)
 877				expires = interval;
 878			continue;
 879		}
 880
 881		list_del(&c->list);
 882		mr6_netlink_event(mrt, c, RTM_DELROUTE);
 883		ip6mr_destroy_unres(mrt, c);
 884	}
 885
 886	if (!list_empty(&mrt->mfc6_unres_queue))
 887		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 888}
 889
 890static void ipmr_expire_process(unsigned long arg)
 891{
 892	struct mr6_table *mrt = (struct mr6_table *)arg;
 893
 894	if (!spin_trylock(&mfc_unres_lock)) {
 895		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
 896		return;
 897	}
 898
 899	if (!list_empty(&mrt->mfc6_unres_queue))
 900		ipmr_do_expire_process(mrt);
 901
 902	spin_unlock(&mfc_unres_lock);
 903}
 904
 905/* Fill oifs list. It is called under write locked mrt_lock. */
 906
 907static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
 908				    unsigned char *ttls)
 909{
 910	int vifi;
 911
 912	cache->mfc_un.res.minvif = MAXMIFS;
 913	cache->mfc_un.res.maxvif = 0;
 914	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
 915
 916	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
 917		if (MIF_EXISTS(mrt, vifi) &&
 918		    ttls[vifi] && ttls[vifi] < 255) {
 919			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
 920			if (cache->mfc_un.res.minvif > vifi)
 921				cache->mfc_un.res.minvif = vifi;
 922			if (cache->mfc_un.res.maxvif <= vifi)
 923				cache->mfc_un.res.maxvif = vifi + 1;
 924		}
 925	}
 926}
 927
 928static int mif6_add(struct net *net, struct mr6_table *mrt,
 929		    struct mif6ctl *vifc, int mrtsock)
 930{
 931	int vifi = vifc->mif6c_mifi;
 932	struct mif_device *v = &mrt->vif6_table[vifi];
 933	struct net_device *dev;
 934	struct inet6_dev *in6_dev;
 935	int err;
 936
 937	/* Is vif busy ? */
 938	if (MIF_EXISTS(mrt, vifi))
 939		return -EADDRINUSE;
 940
 941	switch (vifc->mif6c_flags) {
 942#ifdef CONFIG_IPV6_PIMSM_V2
 943	case MIFF_REGISTER:
 944		/*
 945		 * Special Purpose VIF in PIM
 946		 * All the packets will be sent to the daemon
 947		 */
 948		if (mrt->mroute_reg_vif_num >= 0)
 949			return -EADDRINUSE;
 950		dev = ip6mr_reg_vif(net, mrt);
 951		if (!dev)
 952			return -ENOBUFS;
 953		err = dev_set_allmulti(dev, 1);
 954		if (err) {
 955			unregister_netdevice(dev);
 956			dev_put(dev);
 957			return err;
 958		}
 959		break;
 960#endif
 961	case 0:
 962		dev = dev_get_by_index(net, vifc->mif6c_pifi);
 963		if (!dev)
 964			return -EADDRNOTAVAIL;
 965		err = dev_set_allmulti(dev, 1);
 966		if (err) {
 967			dev_put(dev);
 968			return err;
 969		}
 970		break;
 971	default:
 972		return -EINVAL;
 973	}
 974
 975	in6_dev = __in6_dev_get(dev);
 976	if (in6_dev) {
 977		in6_dev->cnf.mc_forwarding++;
 978		inet6_netconf_notify_devconf(dev_net(dev),
 979					     NETCONFA_MC_FORWARDING,
 980					     dev->ifindex, &in6_dev->cnf);
 981	}
 982
 983	/*
 984	 *	Fill in the VIF structures
 985	 */
 986	v->rate_limit = vifc->vifc_rate_limit;
 987	v->flags = vifc->mif6c_flags;
 988	if (!mrtsock)
 989		v->flags |= VIFF_STATIC;
 990	v->threshold = vifc->vifc_threshold;
 991	v->bytes_in = 0;
 992	v->bytes_out = 0;
 993	v->pkt_in = 0;
 994	v->pkt_out = 0;
 995	v->link = dev->ifindex;
 996	if (v->flags & MIFF_REGISTER)
 997		v->link = dev->iflink;
 998
 999	/* And finish update writing critical data */
1000	write_lock_bh(&mrt_lock);
1001	v->dev = dev;
1002#ifdef CONFIG_IPV6_PIMSM_V2
1003	if (v->flags & MIFF_REGISTER)
1004		mrt->mroute_reg_vif_num = vifi;
1005#endif
1006	if (vifi + 1 > mrt->maxvif)
1007		mrt->maxvif = vifi + 1;
1008	write_unlock_bh(&mrt_lock);
1009	return 0;
1010}
1011
1012static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1013					   const struct in6_addr *origin,
1014					   const struct in6_addr *mcastgrp)
1015{
1016	int line = MFC6_HASH(mcastgrp, origin);
1017	struct mfc6_cache *c;
1018
1019	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1020		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1021		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1022			return c;
1023	}
1024	return NULL;
1025}
1026
1027/* Look for a (*,*,oif) entry */
1028static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1029						      mifi_t mifi)
1030{
1031	int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1032	struct mfc6_cache *c;
1033
1034	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1035		if (ipv6_addr_any(&c->mf6c_origin) &&
1036		    ipv6_addr_any(&c->mf6c_mcastgrp) &&
1037		    (c->mfc_un.res.ttls[mifi] < 255))
1038			return c;
1039
1040	return NULL;
1041}
1042
1043/* Look for a (*,G) entry */
1044static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1045					       struct in6_addr *mcastgrp,
1046					       mifi_t mifi)
1047{
1048	int line = MFC6_HASH(mcastgrp, &in6addr_any);
1049	struct mfc6_cache *c, *proxy;
1050
1051	if (ipv6_addr_any(mcastgrp))
1052		goto skip;
1053
1054	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1055		if (ipv6_addr_any(&c->mf6c_origin) &&
1056		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1057			if (c->mfc_un.res.ttls[mifi] < 255)
1058				return c;
1059
1060			/* It's ok if the mifi is part of the static tree */
1061			proxy = ip6mr_cache_find_any_parent(mrt,
1062							    c->mf6c_parent);
1063			if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1064				return c;
1065		}
1066
1067skip:
1068	return ip6mr_cache_find_any_parent(mrt, mifi);
1069}
1070
1071/*
1072 *	Allocate a multicast cache entry
1073 */
1074static struct mfc6_cache *ip6mr_cache_alloc(void)
1075{
1076	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1077	if (c == NULL)
1078		return NULL;
1079	c->mfc_un.res.minvif = MAXMIFS;
1080	return c;
1081}
1082
1083static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1084{
1085	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1086	if (c == NULL)
1087		return NULL;
1088	skb_queue_head_init(&c->mfc_un.unres.unresolved);
1089	c->mfc_un.unres.expires = jiffies + 10 * HZ;
1090	return c;
1091}
1092
1093/*
1094 *	A cache entry has gone into a resolved state from queued
1095 */
1096
1097static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1098				struct mfc6_cache *uc, struct mfc6_cache *c)
1099{
1100	struct sk_buff *skb;
1101
1102	/*
1103	 *	Play the pending entries through our router
1104	 */
1105
1106	while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1107		if (ipv6_hdr(skb)->version == 0) {
1108			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1109
1110			if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1111				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1112			} else {
1113				nlh->nlmsg_type = NLMSG_ERROR;
1114				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1115				skb_trim(skb, nlh->nlmsg_len);
1116				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1117			}
1118			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1119		} else
1120			ip6_mr_forward(net, mrt, skb, c);
1121	}
1122}
1123
1124/*
1125 *	Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1126 *	expects the following bizarre scheme.
1127 *
1128 *	Called under mrt_lock.
1129 */
1130
1131static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1132			      mifi_t mifi, int assert)
1133{
1134	struct sk_buff *skb;
1135	struct mrt6msg *msg;
1136	int ret;
1137
1138#ifdef CONFIG_IPV6_PIMSM_V2
1139	if (assert == MRT6MSG_WHOLEPKT)
1140		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1141						+sizeof(*msg));
1142	else
1143#endif
1144		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1145
1146	if (!skb)
1147		return -ENOBUFS;
1148
1149	/* I suppose that internal messages
1150	 * do not require checksums */
1151
1152	skb->ip_summed = CHECKSUM_UNNECESSARY;
1153
1154#ifdef CONFIG_IPV6_PIMSM_V2
1155	if (assert == MRT6MSG_WHOLEPKT) {
1156		/* Ugly, but we have no choice with this interface.
1157		   Duplicate old header, fix length etc.
1158		   And all this only to mangle msg->im6_msgtype and
1159		   to set msg->im6_mbz to "mbz" :-)
1160		 */
1161		skb_push(skb, -skb_network_offset(pkt));
1162
1163		skb_push(skb, sizeof(*msg));
1164		skb_reset_transport_header(skb);
1165		msg = (struct mrt6msg *)skb_transport_header(skb);
1166		msg->im6_mbz = 0;
1167		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1168		msg->im6_mif = mrt->mroute_reg_vif_num;
1169		msg->im6_pad = 0;
1170		msg->im6_src = ipv6_hdr(pkt)->saddr;
1171		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1172
1173		skb->ip_summed = CHECKSUM_UNNECESSARY;
1174	} else
1175#endif
1176	{
1177	/*
1178	 *	Copy the IP header
1179	 */
1180
1181	skb_put(skb, sizeof(struct ipv6hdr));
1182	skb_reset_network_header(skb);
1183	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1184
1185	/*
1186	 *	Add our header
1187	 */
1188	skb_put(skb, sizeof(*msg));
1189	skb_reset_transport_header(skb);
1190	msg = (struct mrt6msg *)skb_transport_header(skb);
1191
1192	msg->im6_mbz = 0;
1193	msg->im6_msgtype = assert;
1194	msg->im6_mif = mifi;
1195	msg->im6_pad = 0;
1196	msg->im6_src = ipv6_hdr(pkt)->saddr;
1197	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1198
1199	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1200	skb->ip_summed = CHECKSUM_UNNECESSARY;
1201	}
1202
1203	if (mrt->mroute6_sk == NULL) {
1204		kfree_skb(skb);
1205		return -EINVAL;
1206	}
1207
1208	/*
1209	 *	Deliver to user space multicast routing algorithms
1210	 */
1211	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1212	if (ret < 0) {
1213		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
 
1214		kfree_skb(skb);
1215	}
1216
1217	return ret;
1218}
1219
1220/*
1221 *	Queue a packet for resolution. It gets locked cache entry!
1222 */
1223
1224static int
1225ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1226{
1227	bool found = false;
1228	int err;
1229	struct mfc6_cache *c;
1230
1231	spin_lock_bh(&mfc_unres_lock);
1232	list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1233		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1234		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1235			found = true;
1236			break;
1237		}
1238	}
1239
1240	if (!found) {
1241		/*
1242		 *	Create a new entry if allowable
1243		 */
1244
1245		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1246		    (c = ip6mr_cache_alloc_unres()) == NULL) {
1247			spin_unlock_bh(&mfc_unres_lock);
1248
1249			kfree_skb(skb);
1250			return -ENOBUFS;
1251		}
1252
1253		/*
1254		 *	Fill in the new cache entry
1255		 */
1256		c->mf6c_parent = -1;
1257		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1258		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1259
1260		/*
1261		 *	Reflect first query at pim6sd
1262		 */
1263		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1264		if (err < 0) {
1265			/* If the report failed throw the cache entry
1266			   out - Brad Parker
1267			 */
1268			spin_unlock_bh(&mfc_unres_lock);
1269
1270			ip6mr_cache_free(c);
1271			kfree_skb(skb);
1272			return err;
1273		}
1274
1275		atomic_inc(&mrt->cache_resolve_queue_len);
1276		list_add(&c->list, &mrt->mfc6_unres_queue);
1277		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1278
1279		ipmr_do_expire_process(mrt);
1280	}
1281
1282	/*
1283	 *	See if we can append the packet
1284	 */
1285	if (c->mfc_un.unres.unresolved.qlen > 3) {
1286		kfree_skb(skb);
1287		err = -ENOBUFS;
1288	} else {
1289		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1290		err = 0;
1291	}
1292
1293	spin_unlock_bh(&mfc_unres_lock);
1294	return err;
1295}
1296
1297/*
1298 *	MFC6 cache manipulation by user space
1299 */
1300
1301static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1302			    int parent)
1303{
1304	int line;
1305	struct mfc6_cache *c, *next;
1306
1307	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1308
1309	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1310		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1311		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1312				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1313		    (parent == -1 || parent == c->mf6c_parent)) {
1314			write_lock_bh(&mrt_lock);
1315			list_del(&c->list);
1316			write_unlock_bh(&mrt_lock);
1317
1318			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1319			ip6mr_cache_free(c);
1320			return 0;
1321		}
1322	}
1323	return -ENOENT;
1324}
1325
1326static int ip6mr_device_event(struct notifier_block *this,
1327			      unsigned long event, void *ptr)
1328{
1329	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1330	struct net *net = dev_net(dev);
1331	struct mr6_table *mrt;
1332	struct mif_device *v;
1333	int ct;
1334	LIST_HEAD(list);
1335
1336	if (event != NETDEV_UNREGISTER)
1337		return NOTIFY_DONE;
1338
1339	ip6mr_for_each_table(mrt, net) {
1340		v = &mrt->vif6_table[0];
1341		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1342			if (v->dev == dev)
1343				mif6_delete(mrt, ct, &list);
1344		}
1345	}
1346	unregister_netdevice_many(&list);
1347
1348	return NOTIFY_DONE;
1349}
1350
1351static struct notifier_block ip6_mr_notifier = {
1352	.notifier_call = ip6mr_device_event
1353};
1354
1355/*
1356 *	Setup for IP multicast routing
1357 */
1358
1359static int __net_init ip6mr_net_init(struct net *net)
1360{
1361	int err;
1362
1363	err = ip6mr_rules_init(net);
1364	if (err < 0)
1365		goto fail;
1366
1367#ifdef CONFIG_PROC_FS
1368	err = -ENOMEM;
1369	if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1370		goto proc_vif_fail;
1371	if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1372		goto proc_cache_fail;
1373#endif
1374
1375	return 0;
1376
1377#ifdef CONFIG_PROC_FS
1378proc_cache_fail:
1379	remove_proc_entry("ip6_mr_vif", net->proc_net);
1380proc_vif_fail:
1381	ip6mr_rules_exit(net);
1382#endif
1383fail:
1384	return err;
1385}
1386
1387static void __net_exit ip6mr_net_exit(struct net *net)
1388{
1389#ifdef CONFIG_PROC_FS
1390	remove_proc_entry("ip6_mr_cache", net->proc_net);
1391	remove_proc_entry("ip6_mr_vif", net->proc_net);
1392#endif
1393	ip6mr_rules_exit(net);
1394}
1395
1396static struct pernet_operations ip6mr_net_ops = {
1397	.init = ip6mr_net_init,
1398	.exit = ip6mr_net_exit,
1399};
1400
1401int __init ip6_mr_init(void)
1402{
1403	int err;
1404
1405	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1406				       sizeof(struct mfc6_cache),
1407				       0, SLAB_HWCACHE_ALIGN,
1408				       NULL);
1409	if (!mrt_cachep)
1410		return -ENOMEM;
1411
1412	err = register_pernet_subsys(&ip6mr_net_ops);
1413	if (err)
1414		goto reg_pernet_fail;
1415
1416	err = register_netdevice_notifier(&ip6_mr_notifier);
1417	if (err)
1418		goto reg_notif_fail;
1419#ifdef CONFIG_IPV6_PIMSM_V2
1420	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1421		pr_err("%s: can't add PIM protocol\n", __func__);
1422		err = -EAGAIN;
1423		goto add_proto_fail;
1424	}
1425#endif
1426	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1427		      ip6mr_rtm_dumproute, NULL);
1428	return 0;
1429#ifdef CONFIG_IPV6_PIMSM_V2
1430add_proto_fail:
1431	unregister_netdevice_notifier(&ip6_mr_notifier);
1432#endif
1433reg_notif_fail:
1434	unregister_pernet_subsys(&ip6mr_net_ops);
1435reg_pernet_fail:
1436	kmem_cache_destroy(mrt_cachep);
1437	return err;
1438}
1439
1440void ip6_mr_cleanup(void)
1441{
1442	unregister_netdevice_notifier(&ip6_mr_notifier);
1443	unregister_pernet_subsys(&ip6mr_net_ops);
1444	kmem_cache_destroy(mrt_cachep);
1445}
1446
1447static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1448			 struct mf6cctl *mfc, int mrtsock, int parent)
1449{
1450	bool found = false;
1451	int line;
1452	struct mfc6_cache *uc, *c;
1453	unsigned char ttls[MAXMIFS];
1454	int i;
1455
1456	if (mfc->mf6cc_parent >= MAXMIFS)
1457		return -ENFILE;
1458
1459	memset(ttls, 255, MAXMIFS);
1460	for (i = 0; i < MAXMIFS; i++) {
1461		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1462			ttls[i] = 1;
1463
1464	}
1465
1466	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1467
1468	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1469		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1470		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1471				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1472		    (parent == -1 || parent == mfc->mf6cc_parent)) {
1473			found = true;
1474			break;
1475		}
1476	}
1477
1478	if (found) {
1479		write_lock_bh(&mrt_lock);
1480		c->mf6c_parent = mfc->mf6cc_parent;
1481		ip6mr_update_thresholds(mrt, c, ttls);
1482		if (!mrtsock)
1483			c->mfc_flags |= MFC_STATIC;
1484		write_unlock_bh(&mrt_lock);
1485		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1486		return 0;
1487	}
1488
1489	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1490	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1491		return -EINVAL;
1492
1493	c = ip6mr_cache_alloc();
1494	if (c == NULL)
1495		return -ENOMEM;
1496
1497	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1498	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1499	c->mf6c_parent = mfc->mf6cc_parent;
1500	ip6mr_update_thresholds(mrt, c, ttls);
1501	if (!mrtsock)
1502		c->mfc_flags |= MFC_STATIC;
1503
1504	write_lock_bh(&mrt_lock);
1505	list_add(&c->list, &mrt->mfc6_cache_array[line]);
1506	write_unlock_bh(&mrt_lock);
1507
1508	/*
1509	 *	Check to see if we resolved a queued list. If so we
1510	 *	need to send on the frames and tidy up.
1511	 */
1512	found = false;
1513	spin_lock_bh(&mfc_unres_lock);
1514	list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1515		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1516		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1517			list_del(&uc->list);
1518			atomic_dec(&mrt->cache_resolve_queue_len);
1519			found = true;
1520			break;
1521		}
1522	}
1523	if (list_empty(&mrt->mfc6_unres_queue))
1524		del_timer(&mrt->ipmr_expire_timer);
1525	spin_unlock_bh(&mfc_unres_lock);
1526
1527	if (found) {
1528		ip6mr_cache_resolve(net, mrt, uc, c);
1529		ip6mr_cache_free(uc);
1530	}
1531	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1532	return 0;
1533}
1534
1535/*
1536 *	Close the multicast socket, and clear the vif tables etc
1537 */
1538
1539static void mroute_clean_tables(struct mr6_table *mrt)
1540{
1541	int i;
1542	LIST_HEAD(list);
1543	struct mfc6_cache *c, *next;
1544
1545	/*
1546	 *	Shut down all active vif entries
1547	 */
1548	for (i = 0; i < mrt->maxvif; i++) {
1549		if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1550			mif6_delete(mrt, i, &list);
1551	}
1552	unregister_netdevice_many(&list);
1553
1554	/*
1555	 *	Wipe the cache
1556	 */
1557	for (i = 0; i < MFC6_LINES; i++) {
1558		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1559			if (c->mfc_flags & MFC_STATIC)
1560				continue;
1561			write_lock_bh(&mrt_lock);
1562			list_del(&c->list);
1563			write_unlock_bh(&mrt_lock);
1564
1565			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1566			ip6mr_cache_free(c);
1567		}
1568	}
1569
1570	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1571		spin_lock_bh(&mfc_unres_lock);
1572		list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1573			list_del(&c->list);
1574			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1575			ip6mr_destroy_unres(mrt, c);
1576		}
1577		spin_unlock_bh(&mfc_unres_lock);
1578	}
1579}
1580
1581static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1582{
1583	int err = 0;
1584	struct net *net = sock_net(sk);
1585
1586	rtnl_lock();
1587	write_lock_bh(&mrt_lock);
1588	if (likely(mrt->mroute6_sk == NULL)) {
1589		mrt->mroute6_sk = sk;
1590		net->ipv6.devconf_all->mc_forwarding++;
1591		inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1592					     NETCONFA_IFINDEX_ALL,
1593					     net->ipv6.devconf_all);
1594	}
1595	else
1596		err = -EADDRINUSE;
1597	write_unlock_bh(&mrt_lock);
1598
1599	rtnl_unlock();
1600
1601	return err;
1602}
1603
1604int ip6mr_sk_done(struct sock *sk)
1605{
1606	int err = -EACCES;
1607	struct net *net = sock_net(sk);
1608	struct mr6_table *mrt;
1609
1610	rtnl_lock();
1611	ip6mr_for_each_table(mrt, net) {
1612		if (sk == mrt->mroute6_sk) {
1613			write_lock_bh(&mrt_lock);
1614			mrt->mroute6_sk = NULL;
1615			net->ipv6.devconf_all->mc_forwarding--;
1616			inet6_netconf_notify_devconf(net,
1617						     NETCONFA_MC_FORWARDING,
1618						     NETCONFA_IFINDEX_ALL,
1619						     net->ipv6.devconf_all);
1620			write_unlock_bh(&mrt_lock);
1621
1622			mroute_clean_tables(mrt);
1623			err = 0;
1624			break;
1625		}
1626	}
1627	rtnl_unlock();
1628
1629	return err;
1630}
1631
1632struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1633{
1634	struct mr6_table *mrt;
1635	struct flowi6 fl6 = {
1636		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1637		.flowi6_oif	= skb->dev->ifindex,
1638		.flowi6_mark	= skb->mark,
1639	};
1640
1641	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1642		return NULL;
1643
1644	return mrt->mroute6_sk;
1645}
1646
1647/*
1648 *	Socket options and virtual interface manipulation. The whole
1649 *	virtual interface system is a complete heap, but unfortunately
1650 *	that's how BSD mrouted happens to think. Maybe one day with a proper
1651 *	MOSPF/PIM router set up we can clean this up.
1652 */
1653
1654int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1655{
1656	int ret, parent = 0;
1657	struct mif6ctl vif;
1658	struct mf6cctl mfc;
1659	mifi_t mifi;
1660	struct net *net = sock_net(sk);
1661	struct mr6_table *mrt;
1662
1663	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1664	if (mrt == NULL)
1665		return -ENOENT;
1666
1667	if (optname != MRT6_INIT) {
1668		if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1669			return -EACCES;
1670	}
1671
1672	switch (optname) {
1673	case MRT6_INIT:
1674		if (sk->sk_type != SOCK_RAW ||
1675		    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1676			return -EOPNOTSUPP;
1677		if (optlen < sizeof(int))
1678			return -EINVAL;
1679
1680		return ip6mr_sk_init(mrt, sk);
1681
1682	case MRT6_DONE:
1683		return ip6mr_sk_done(sk);
1684
1685	case MRT6_ADD_MIF:
1686		if (optlen < sizeof(vif))
1687			return -EINVAL;
1688		if (copy_from_user(&vif, optval, sizeof(vif)))
1689			return -EFAULT;
1690		if (vif.mif6c_mifi >= MAXMIFS)
1691			return -ENFILE;
1692		rtnl_lock();
1693		ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1694		rtnl_unlock();
1695		return ret;
1696
1697	case MRT6_DEL_MIF:
1698		if (optlen < sizeof(mifi_t))
1699			return -EINVAL;
1700		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1701			return -EFAULT;
1702		rtnl_lock();
1703		ret = mif6_delete(mrt, mifi, NULL);
1704		rtnl_unlock();
1705		return ret;
1706
1707	/*
1708	 *	Manipulate the forwarding caches. These live
1709	 *	in a sort of kernel/user symbiosis.
1710	 */
1711	case MRT6_ADD_MFC:
1712	case MRT6_DEL_MFC:
1713		parent = -1;
1714	case MRT6_ADD_MFC_PROXY:
1715	case MRT6_DEL_MFC_PROXY:
1716		if (optlen < sizeof(mfc))
1717			return -EINVAL;
1718		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1719			return -EFAULT;
1720		if (parent == 0)
1721			parent = mfc.mf6cc_parent;
1722		rtnl_lock();
1723		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1724			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1725		else
1726			ret = ip6mr_mfc_add(net, mrt, &mfc,
1727					    sk == mrt->mroute6_sk, parent);
1728		rtnl_unlock();
1729		return ret;
1730
1731	/*
1732	 *	Control PIM assert (to activate pim will activate assert)
1733	 */
1734	case MRT6_ASSERT:
1735	{
1736		int v;
1737
1738		if (optlen != sizeof(v))
1739			return -EINVAL;
1740		if (get_user(v, (int __user *)optval))
1741			return -EFAULT;
1742		mrt->mroute_do_assert = v;
1743		return 0;
1744	}
1745
1746#ifdef CONFIG_IPV6_PIMSM_V2
1747	case MRT6_PIM:
1748	{
1749		int v;
1750
1751		if (optlen != sizeof(v))
1752			return -EINVAL;
1753		if (get_user(v, (int __user *)optval))
1754			return -EFAULT;
1755		v = !!v;
1756		rtnl_lock();
1757		ret = 0;
1758		if (v != mrt->mroute_do_pim) {
1759			mrt->mroute_do_pim = v;
1760			mrt->mroute_do_assert = v;
1761		}
1762		rtnl_unlock();
1763		return ret;
1764	}
1765
1766#endif
1767#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1768	case MRT6_TABLE:
1769	{
1770		u32 v;
1771
1772		if (optlen != sizeof(u32))
1773			return -EINVAL;
1774		if (get_user(v, (u32 __user *)optval))
1775			return -EFAULT;
1776		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1777		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1778			return -EINVAL;
1779		if (sk == mrt->mroute6_sk)
1780			return -EBUSY;
1781
1782		rtnl_lock();
1783		ret = 0;
1784		if (!ip6mr_new_table(net, v))
1785			ret = -ENOMEM;
1786		raw6_sk(sk)->ip6mr_table = v;
1787		rtnl_unlock();
1788		return ret;
1789	}
1790#endif
1791	/*
1792	 *	Spurious command, or MRT6_VERSION which you cannot
1793	 *	set.
1794	 */
1795	default:
1796		return -ENOPROTOOPT;
1797	}
1798}
1799
1800/*
1801 *	Getsock opt support for the multicast routing system.
1802 */
1803
1804int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1805			  int __user *optlen)
1806{
1807	int olr;
1808	int val;
1809	struct net *net = sock_net(sk);
1810	struct mr6_table *mrt;
1811
1812	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1813	if (mrt == NULL)
1814		return -ENOENT;
1815
1816	switch (optname) {
1817	case MRT6_VERSION:
1818		val = 0x0305;
1819		break;
1820#ifdef CONFIG_IPV6_PIMSM_V2
1821	case MRT6_PIM:
1822		val = mrt->mroute_do_pim;
1823		break;
1824#endif
1825	case MRT6_ASSERT:
1826		val = mrt->mroute_do_assert;
1827		break;
1828	default:
1829		return -ENOPROTOOPT;
1830	}
1831
1832	if (get_user(olr, optlen))
1833		return -EFAULT;
1834
1835	olr = min_t(int, olr, sizeof(int));
1836	if (olr < 0)
1837		return -EINVAL;
1838
1839	if (put_user(olr, optlen))
1840		return -EFAULT;
1841	if (copy_to_user(optval, &val, olr))
1842		return -EFAULT;
1843	return 0;
1844}
1845
1846/*
1847 *	The IP multicast ioctl support routines.
1848 */
1849
1850int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1851{
1852	struct sioc_sg_req6 sr;
1853	struct sioc_mif_req6 vr;
1854	struct mif_device *vif;
1855	struct mfc6_cache *c;
1856	struct net *net = sock_net(sk);
1857	struct mr6_table *mrt;
1858
1859	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1860	if (mrt == NULL)
1861		return -ENOENT;
1862
1863	switch (cmd) {
1864	case SIOCGETMIFCNT_IN6:
1865		if (copy_from_user(&vr, arg, sizeof(vr)))
1866			return -EFAULT;
1867		if (vr.mifi >= mrt->maxvif)
1868			return -EINVAL;
1869		read_lock(&mrt_lock);
1870		vif = &mrt->vif6_table[vr.mifi];
1871		if (MIF_EXISTS(mrt, vr.mifi)) {
1872			vr.icount = vif->pkt_in;
1873			vr.ocount = vif->pkt_out;
1874			vr.ibytes = vif->bytes_in;
1875			vr.obytes = vif->bytes_out;
1876			read_unlock(&mrt_lock);
1877
1878			if (copy_to_user(arg, &vr, sizeof(vr)))
1879				return -EFAULT;
1880			return 0;
1881		}
1882		read_unlock(&mrt_lock);
1883		return -EADDRNOTAVAIL;
1884	case SIOCGETSGCNT_IN6:
1885		if (copy_from_user(&sr, arg, sizeof(sr)))
1886			return -EFAULT;
1887
1888		read_lock(&mrt_lock);
1889		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1890		if (c) {
1891			sr.pktcnt = c->mfc_un.res.pkt;
1892			sr.bytecnt = c->mfc_un.res.bytes;
1893			sr.wrong_if = c->mfc_un.res.wrong_if;
1894			read_unlock(&mrt_lock);
1895
1896			if (copy_to_user(arg, &sr, sizeof(sr)))
1897				return -EFAULT;
1898			return 0;
1899		}
1900		read_unlock(&mrt_lock);
1901		return -EADDRNOTAVAIL;
1902	default:
1903		return -ENOIOCTLCMD;
1904	}
1905}
1906
1907#ifdef CONFIG_COMPAT
1908struct compat_sioc_sg_req6 {
1909	struct sockaddr_in6 src;
1910	struct sockaddr_in6 grp;
1911	compat_ulong_t pktcnt;
1912	compat_ulong_t bytecnt;
1913	compat_ulong_t wrong_if;
1914};
1915
1916struct compat_sioc_mif_req6 {
1917	mifi_t	mifi;
1918	compat_ulong_t icount;
1919	compat_ulong_t ocount;
1920	compat_ulong_t ibytes;
1921	compat_ulong_t obytes;
1922};
1923
1924int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1925{
1926	struct compat_sioc_sg_req6 sr;
1927	struct compat_sioc_mif_req6 vr;
1928	struct mif_device *vif;
1929	struct mfc6_cache *c;
1930	struct net *net = sock_net(sk);
1931	struct mr6_table *mrt;
1932
1933	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1934	if (mrt == NULL)
1935		return -ENOENT;
1936
1937	switch (cmd) {
1938	case SIOCGETMIFCNT_IN6:
1939		if (copy_from_user(&vr, arg, sizeof(vr)))
1940			return -EFAULT;
1941		if (vr.mifi >= mrt->maxvif)
1942			return -EINVAL;
1943		read_lock(&mrt_lock);
1944		vif = &mrt->vif6_table[vr.mifi];
1945		if (MIF_EXISTS(mrt, vr.mifi)) {
1946			vr.icount = vif->pkt_in;
1947			vr.ocount = vif->pkt_out;
1948			vr.ibytes = vif->bytes_in;
1949			vr.obytes = vif->bytes_out;
1950			read_unlock(&mrt_lock);
1951
1952			if (copy_to_user(arg, &vr, sizeof(vr)))
1953				return -EFAULT;
1954			return 0;
1955		}
1956		read_unlock(&mrt_lock);
1957		return -EADDRNOTAVAIL;
1958	case SIOCGETSGCNT_IN6:
1959		if (copy_from_user(&sr, arg, sizeof(sr)))
1960			return -EFAULT;
1961
1962		read_lock(&mrt_lock);
1963		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1964		if (c) {
1965			sr.pktcnt = c->mfc_un.res.pkt;
1966			sr.bytecnt = c->mfc_un.res.bytes;
1967			sr.wrong_if = c->mfc_un.res.wrong_if;
1968			read_unlock(&mrt_lock);
1969
1970			if (copy_to_user(arg, &sr, sizeof(sr)))
1971				return -EFAULT;
1972			return 0;
1973		}
1974		read_unlock(&mrt_lock);
1975		return -EADDRNOTAVAIL;
1976	default:
1977		return -ENOIOCTLCMD;
1978	}
1979}
1980#endif
1981
1982static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1983{
1984	IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1985			 IPSTATS_MIB_OUTFORWDATAGRAMS);
1986	IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1987			 IPSTATS_MIB_OUTOCTETS, skb->len);
1988	return dst_output(skb);
1989}
1990
1991/*
1992 *	Processing handlers for ip6mr_forward
1993 */
1994
1995static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1996			  struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1997{
1998	struct ipv6hdr *ipv6h;
1999	struct mif_device *vif = &mrt->vif6_table[vifi];
2000	struct net_device *dev;
2001	struct dst_entry *dst;
2002	struct flowi6 fl6;
2003
2004	if (vif->dev == NULL)
2005		goto out_free;
2006
2007#ifdef CONFIG_IPV6_PIMSM_V2
2008	if (vif->flags & MIFF_REGISTER) {
2009		vif->pkt_out++;
2010		vif->bytes_out += skb->len;
2011		vif->dev->stats.tx_bytes += skb->len;
2012		vif->dev->stats.tx_packets++;
2013		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2014		goto out_free;
2015	}
2016#endif
2017
2018	ipv6h = ipv6_hdr(skb);
2019
2020	fl6 = (struct flowi6) {
2021		.flowi6_oif = vif->link,
2022		.daddr = ipv6h->daddr,
2023	};
2024
2025	dst = ip6_route_output(net, NULL, &fl6);
2026	if (dst->error) {
2027		dst_release(dst);
2028		goto out_free;
2029	}
2030
2031	skb_dst_drop(skb);
2032	skb_dst_set(skb, dst);
2033
2034	/*
2035	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2036	 * not only before forwarding, but after forwarding on all output
2037	 * interfaces. It is clear, if mrouter runs a multicasting
2038	 * program, it should receive packets not depending to what interface
2039	 * program is joined.
2040	 * If we will not make it, the program will have to join on all
2041	 * interfaces. On the other hand, multihoming host (or router, but
2042	 * not mrouter) cannot join to more than one interface - it will
2043	 * result in receiving multiple packets.
2044	 */
2045	dev = vif->dev;
2046	skb->dev = dev;
2047	vif->pkt_out++;
2048	vif->bytes_out += skb->len;
2049
2050	/* We are about to write */
2051	/* XXX: extension headers? */
2052	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2053		goto out_free;
2054
2055	ipv6h = ipv6_hdr(skb);
2056	ipv6h->hop_limit--;
2057
2058	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2059
2060	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
2061		       ip6mr_forward2_finish);
2062
2063out_free:
2064	kfree_skb(skb);
2065	return 0;
2066}
2067
2068static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2069{
2070	int ct;
2071
2072	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2073		if (mrt->vif6_table[ct].dev == dev)
2074			break;
2075	}
2076	return ct;
2077}
2078
2079static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2080			   struct sk_buff *skb, struct mfc6_cache *cache)
2081{
2082	int psend = -1;
2083	int vif, ct;
2084	int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2085
2086	vif = cache->mf6c_parent;
2087	cache->mfc_un.res.pkt++;
2088	cache->mfc_un.res.bytes += skb->len;
2089
2090	if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2091		struct mfc6_cache *cache_proxy;
2092
2093		/* For an (*,G) entry, we only check that the incomming
2094		 * interface is part of the static tree.
2095		 */
2096		cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2097		if (cache_proxy &&
2098		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2099			goto forward;
2100	}
2101
2102	/*
2103	 * Wrong interface: drop packet and (maybe) send PIM assert.
2104	 */
2105	if (mrt->vif6_table[vif].dev != skb->dev) {
 
 
2106		cache->mfc_un.res.wrong_if++;
 
2107
2108		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2109		    /* pimsm uses asserts, when switching from RPT to SPT,
2110		       so that we cannot check that packet arrived on an oif.
2111		       It is bad, but otherwise we would need to move pretty
2112		       large chunk of pimd to kernel. Ough... --ANK
2113		     */
2114		    (mrt->mroute_do_pim ||
2115		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2116		    time_after(jiffies,
2117			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2118			cache->mfc_un.res.last_assert = jiffies;
2119			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2120		}
2121		goto dont_forward;
2122	}
2123
2124forward:
2125	mrt->vif6_table[vif].pkt_in++;
2126	mrt->vif6_table[vif].bytes_in += skb->len;
2127
2128	/*
2129	 *	Forward the frame
2130	 */
2131	if (ipv6_addr_any(&cache->mf6c_origin) &&
2132	    ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2133		if (true_vifi >= 0 &&
2134		    true_vifi != cache->mf6c_parent &&
2135		    ipv6_hdr(skb)->hop_limit >
2136				cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2137			/* It's an (*,*) entry and the packet is not coming from
2138			 * the upstream: forward the packet to the upstream
2139			 * only.
2140			 */
2141			psend = cache->mf6c_parent;
2142			goto last_forward;
2143		}
2144		goto dont_forward;
2145	}
2146	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2147		/* For (*,G) entry, don't forward to the incoming interface */
2148		if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2149		    ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2150			if (psend != -1) {
2151				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2152				if (skb2)
2153					ip6mr_forward2(net, mrt, skb2, cache, psend);
2154			}
2155			psend = ct;
2156		}
2157	}
2158last_forward:
2159	if (psend != -1) {
2160		ip6mr_forward2(net, mrt, skb, cache, psend);
2161		return;
2162	}
2163
2164dont_forward:
2165	kfree_skb(skb);
 
2166}
2167
2168
2169/*
2170 *	Multicast packets for forwarding arrive here
2171 */
2172
2173int ip6_mr_input(struct sk_buff *skb)
2174{
2175	struct mfc6_cache *cache;
2176	struct net *net = dev_net(skb->dev);
2177	struct mr6_table *mrt;
2178	struct flowi6 fl6 = {
2179		.flowi6_iif	= skb->dev->ifindex,
2180		.flowi6_mark	= skb->mark,
2181	};
2182	int err;
2183
2184	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2185	if (err < 0) {
2186		kfree_skb(skb);
2187		return err;
2188	}
2189
2190	read_lock(&mrt_lock);
2191	cache = ip6mr_cache_find(mrt,
2192				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2193	if (cache == NULL) {
2194		int vif = ip6mr_find_vif(mrt, skb->dev);
2195
2196		if (vif >= 0)
2197			cache = ip6mr_cache_find_any(mrt,
2198						     &ipv6_hdr(skb)->daddr,
2199						     vif);
2200	}
2201
2202	/*
2203	 *	No usable cache entry
2204	 */
2205	if (cache == NULL) {
2206		int vif;
2207
2208		vif = ip6mr_find_vif(mrt, skb->dev);
2209		if (vif >= 0) {
2210			int err = ip6mr_cache_unresolved(mrt, vif, skb);
2211			read_unlock(&mrt_lock);
2212
2213			return err;
2214		}
2215		read_unlock(&mrt_lock);
2216		kfree_skb(skb);
2217		return -ENODEV;
2218	}
2219
2220	ip6_mr_forward(net, mrt, skb, cache);
2221
2222	read_unlock(&mrt_lock);
2223
2224	return 0;
2225}
2226
2227
2228static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2229			       struct mfc6_cache *c, struct rtmsg *rtm)
2230{
2231	int ct;
2232	struct rtnexthop *nhp;
2233	struct nlattr *mp_attr;
2234	struct rta_mfc_stats mfcs;
2235
2236	/* If cache is unresolved, don't try to parse IIF and OIF */
2237	if (c->mf6c_parent >= MAXMIFS)
2238		return -ENOENT;
2239
2240	if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2241	    nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2242		return -EMSGSIZE;
2243	mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2244	if (mp_attr == NULL)
2245		return -EMSGSIZE;
2246
2247	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2248		if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2249			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2250			if (nhp == NULL) {
2251				nla_nest_cancel(skb, mp_attr);
2252				return -EMSGSIZE;
2253			}
2254
2255			nhp->rtnh_flags = 0;
2256			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2257			nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2258			nhp->rtnh_len = sizeof(*nhp);
2259		}
2260	}
2261
2262	nla_nest_end(skb, mp_attr);
2263
2264	mfcs.mfcs_packets = c->mfc_un.res.pkt;
2265	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2266	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2267	if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2268		return -EMSGSIZE;
2269
2270	rtm->rtm_type = RTN_MULTICAST;
2271	return 1;
 
 
 
 
2272}
2273
2274int ip6mr_get_route(struct net *net,
2275		    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2276{
2277	int err;
2278	struct mr6_table *mrt;
2279	struct mfc6_cache *cache;
2280	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2281
2282	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2283	if (mrt == NULL)
2284		return -ENOENT;
2285
2286	read_lock(&mrt_lock);
2287	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2288	if (!cache && skb->dev) {
2289		int vif = ip6mr_find_vif(mrt, skb->dev);
2290
2291		if (vif >= 0)
2292			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2293						     vif);
2294	}
2295
2296	if (!cache) {
2297		struct sk_buff *skb2;
2298		struct ipv6hdr *iph;
2299		struct net_device *dev;
2300		int vif;
2301
2302		if (nowait) {
2303			read_unlock(&mrt_lock);
2304			return -EAGAIN;
2305		}
2306
2307		dev = skb->dev;
2308		if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2309			read_unlock(&mrt_lock);
2310			return -ENODEV;
2311		}
2312
2313		/* really correct? */
2314		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2315		if (!skb2) {
2316			read_unlock(&mrt_lock);
2317			return -ENOMEM;
2318		}
2319
2320		skb_reset_transport_header(skb2);
2321
2322		skb_put(skb2, sizeof(struct ipv6hdr));
2323		skb_reset_network_header(skb2);
2324
2325		iph = ipv6_hdr(skb2);
2326		iph->version = 0;
2327		iph->priority = 0;
2328		iph->flow_lbl[0] = 0;
2329		iph->flow_lbl[1] = 0;
2330		iph->flow_lbl[2] = 0;
2331		iph->payload_len = 0;
2332		iph->nexthdr = IPPROTO_NONE;
2333		iph->hop_limit = 0;
2334		iph->saddr = rt->rt6i_src.addr;
2335		iph->daddr = rt->rt6i_dst.addr;
2336
2337		err = ip6mr_cache_unresolved(mrt, vif, skb2);
2338		read_unlock(&mrt_lock);
2339
2340		return err;
2341	}
2342
2343	if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2344		cache->mfc_flags |= MFC_NOTIFY;
2345
2346	err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2347	read_unlock(&mrt_lock);
2348	return err;
2349}
2350
2351static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2352			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2353			     int flags)
2354{
2355	struct nlmsghdr *nlh;
2356	struct rtmsg *rtm;
2357	int err;
2358
2359	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2360	if (nlh == NULL)
2361		return -EMSGSIZE;
2362
2363	rtm = nlmsg_data(nlh);
2364	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2365	rtm->rtm_dst_len  = 128;
2366	rtm->rtm_src_len  = 128;
2367	rtm->rtm_tos      = 0;
2368	rtm->rtm_table    = mrt->id;
2369	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2370		goto nla_put_failure;
2371	rtm->rtm_type = RTN_MULTICAST;
2372	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2373	if (c->mfc_flags & MFC_STATIC)
2374		rtm->rtm_protocol = RTPROT_STATIC;
2375	else
2376		rtm->rtm_protocol = RTPROT_MROUTED;
2377	rtm->rtm_flags    = 0;
2378
2379	if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2380	    nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2381		goto nla_put_failure;
2382	err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2383	/* do not break the dump if cache is unresolved */
2384	if (err < 0 && err != -ENOENT)
2385		goto nla_put_failure;
2386
2387	return nlmsg_end(skb, nlh);
2388
2389nla_put_failure:
2390	nlmsg_cancel(skb, nlh);
2391	return -EMSGSIZE;
2392}
2393
2394static int mr6_msgsize(bool unresolved, int maxvif)
2395{
2396	size_t len =
2397		NLMSG_ALIGN(sizeof(struct rtmsg))
2398		+ nla_total_size(4)	/* RTA_TABLE */
2399		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2400		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2401		;
2402
2403	if (!unresolved)
2404		len = len
2405		      + nla_total_size(4)	/* RTA_IIF */
2406		      + nla_total_size(0)	/* RTA_MULTIPATH */
2407		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2408						/* RTA_MFC_STATS */
2409		      + nla_total_size(sizeof(struct rta_mfc_stats))
2410		;
2411
2412	return len;
2413}
2414
2415static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2416			      int cmd)
2417{
2418	struct net *net = read_pnet(&mrt->net);
2419	struct sk_buff *skb;
2420	int err = -ENOBUFS;
2421
2422	skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2423			GFP_ATOMIC);
2424	if (skb == NULL)
2425		goto errout;
2426
2427	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2428	if (err < 0)
2429		goto errout;
2430
2431	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2432	return;
2433
2434errout:
2435	kfree_skb(skb);
2436	if (err < 0)
2437		rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2438}
2439
2440static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2441{
2442	struct net *net = sock_net(skb->sk);
2443	struct mr6_table *mrt;
2444	struct mfc6_cache *mfc;
2445	unsigned int t = 0, s_t;
2446	unsigned int h = 0, s_h;
2447	unsigned int e = 0, s_e;
2448
2449	s_t = cb->args[0];
2450	s_h = cb->args[1];
2451	s_e = cb->args[2];
2452
2453	read_lock(&mrt_lock);
2454	ip6mr_for_each_table(mrt, net) {
2455		if (t < s_t)
2456			goto next_table;
2457		if (t > s_t)
2458			s_h = 0;
2459		for (h = s_h; h < MFC6_LINES; h++) {
2460			list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2461				if (e < s_e)
2462					goto next_entry;
2463				if (ip6mr_fill_mroute(mrt, skb,
2464						      NETLINK_CB(cb->skb).portid,
2465						      cb->nlh->nlmsg_seq,
2466						      mfc, RTM_NEWROUTE,
2467						      NLM_F_MULTI) < 0)
2468					goto done;
2469next_entry:
2470				e++;
2471			}
2472			e = s_e = 0;
2473		}
2474		spin_lock_bh(&mfc_unres_lock);
2475		list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2476			if (e < s_e)
2477				goto next_entry2;
2478			if (ip6mr_fill_mroute(mrt, skb,
2479					      NETLINK_CB(cb->skb).portid,
2480					      cb->nlh->nlmsg_seq,
2481					      mfc, RTM_NEWROUTE,
2482					      NLM_F_MULTI) < 0) {
2483				spin_unlock_bh(&mfc_unres_lock);
2484				goto done;
2485			}
2486next_entry2:
2487			e++;
2488		}
2489		spin_unlock_bh(&mfc_unres_lock);
2490		e = s_e = 0;
2491		s_h = 0;
2492next_table:
2493		t++;
2494	}
2495done:
2496	read_unlock(&mrt_lock);
2497
2498	cb->args[2] = e;
2499	cb->args[1] = h;
2500	cb->args[0] = t;
2501
2502	return skb->len;
2503}