Loading...
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/uaccess.h>
20#include <linux/types.h>
21#include <linux/sched.h>
22#include <linux/errno.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/kernel.h>
26#include <linux/fcntl.h>
27#include <linux/stat.h>
28#include <linux/socket.h>
29#include <linux/inet.h>
30#include <linux/netdevice.h>
31#include <linux/inetdevice.h>
32#include <linux/proc_fs.h>
33#include <linux/seq_file.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/compat.h>
37#include <net/protocol.h>
38#include <linux/skbuff.h>
39#include <net/sock.h>
40#include <net/raw.h>
41#include <linux/notifier.h>
42#include <linux/if_arp.h>
43#include <net/checksum.h>
44#include <net/netlink.h>
45#include <net/fib_rules.h>
46
47#include <net/ipv6.h>
48#include <net/ip6_route.h>
49#include <linux/mroute6.h>
50#include <linux/pim.h>
51#include <net/addrconf.h>
52#include <linux/netfilter_ipv6.h>
53#include <linux/export.h>
54#include <net/ip6_checksum.h>
55#include <linux/netconf.h>
56
57struct mr6_table {
58 struct list_head list;
59#ifdef CONFIG_NET_NS
60 struct net *net;
61#endif
62 u32 id;
63 struct sock *mroute6_sk;
64 struct timer_list ipmr_expire_timer;
65 struct list_head mfc6_unres_queue;
66 struct list_head mfc6_cache_array[MFC6_LINES];
67 struct mif_device vif6_table[MAXMIFS];
68 int maxvif;
69 atomic_t cache_resolve_queue_len;
70 bool mroute_do_assert;
71 bool mroute_do_pim;
72#ifdef CONFIG_IPV6_PIMSM_V2
73 int mroute_reg_vif_num;
74#endif
75};
76
77struct ip6mr_rule {
78 struct fib_rule common;
79};
80
81struct ip6mr_result {
82 struct mr6_table *mrt;
83};
84
85/* Big lock, protecting vif table, mrt cache and mroute socket state.
86 Note that the changes are semaphored via rtnl_lock.
87 */
88
89static DEFINE_RWLOCK(mrt_lock);
90
91/*
92 * Multicast router control variables
93 */
94
95#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
96
97/* Special spinlock for queue of unresolved entries */
98static DEFINE_SPINLOCK(mfc_unres_lock);
99
100/* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
104
105 In this case data path is free of exclusive locks at all.
106 */
107
108static struct kmem_cache *mrt_cachep __read_mostly;
109
110static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
111static void ip6mr_free_table(struct mr6_table *mrt);
112
113static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
114 struct sk_buff *skb, struct mfc6_cache *cache);
115static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
116 mifi_t mifi, int assert);
117static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
118 struct mfc6_cache *c, struct rtmsg *rtm);
119static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
120 int cmd);
121static int ip6mr_rtm_dumproute(struct sk_buff *skb,
122 struct netlink_callback *cb);
123static void mroute_clean_tables(struct mr6_table *mrt);
124static void ipmr_expire_process(unsigned long arg);
125
126#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
127#define ip6mr_for_each_table(mrt, net) \
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
129
130static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
131{
132 struct mr6_table *mrt;
133
134 ip6mr_for_each_table(mrt, net) {
135 if (mrt->id == id)
136 return mrt;
137 }
138 return NULL;
139}
140
141static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
142 struct mr6_table **mrt)
143{
144 int err;
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
147 .result = &res,
148 .flags = FIB_LOOKUP_NOREF,
149 };
150
151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
152 flowi6_to_flowi(flp6), 0, &arg);
153 if (err < 0)
154 return err;
155 *mrt = res.mrt;
156 return 0;
157}
158
159static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
160 int flags, struct fib_lookup_arg *arg)
161{
162 struct ip6mr_result *res = arg->result;
163 struct mr6_table *mrt;
164
165 switch (rule->action) {
166 case FR_ACT_TO_TBL:
167 break;
168 case FR_ACT_UNREACHABLE:
169 return -ENETUNREACH;
170 case FR_ACT_PROHIBIT:
171 return -EACCES;
172 case FR_ACT_BLACKHOLE:
173 default:
174 return -EINVAL;
175 }
176
177 mrt = ip6mr_get_table(rule->fr_net, rule->table);
178 if (mrt == NULL)
179 return -EAGAIN;
180 res->mrt = mrt;
181 return 0;
182}
183
184static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
185{
186 return 1;
187}
188
189static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
190 FRA_GENERIC_POLICY,
191};
192
193static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh, struct nlattr **tb)
195{
196 return 0;
197}
198
199static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
200 struct nlattr **tb)
201{
202 return 1;
203}
204
205static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
206 struct fib_rule_hdr *frh)
207{
208 frh->dst_len = 0;
209 frh->src_len = 0;
210 frh->tos = 0;
211 return 0;
212}
213
214static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
215 .family = RTNL_FAMILY_IP6MR,
216 .rule_size = sizeof(struct ip6mr_rule),
217 .addr_size = sizeof(struct in6_addr),
218 .action = ip6mr_rule_action,
219 .match = ip6mr_rule_match,
220 .configure = ip6mr_rule_configure,
221 .compare = ip6mr_rule_compare,
222 .default_pref = fib_default_rule_pref,
223 .fill = ip6mr_rule_fill,
224 .nlgroup = RTNLGRP_IPV6_RULE,
225 .policy = ip6mr_rule_policy,
226 .owner = THIS_MODULE,
227};
228
229static int __net_init ip6mr_rules_init(struct net *net)
230{
231 struct fib_rules_ops *ops;
232 struct mr6_table *mrt;
233 int err;
234
235 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
236 if (IS_ERR(ops))
237 return PTR_ERR(ops);
238
239 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
240
241 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
242 if (mrt == NULL) {
243 err = -ENOMEM;
244 goto err1;
245 }
246
247 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
248 if (err < 0)
249 goto err2;
250
251 net->ipv6.mr6_rules_ops = ops;
252 return 0;
253
254err2:
255 kfree(mrt);
256err1:
257 fib_rules_unregister(ops);
258 return err;
259}
260
261static void __net_exit ip6mr_rules_exit(struct net *net)
262{
263 struct mr6_table *mrt, *next;
264
265 rtnl_lock();
266 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
267 list_del(&mrt->list);
268 ip6mr_free_table(mrt);
269 }
270 rtnl_unlock();
271 fib_rules_unregister(net->ipv6.mr6_rules_ops);
272}
273#else
274#define ip6mr_for_each_table(mrt, net) \
275 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
276
277static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
278{
279 return net->ipv6.mrt6;
280}
281
282static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
283 struct mr6_table **mrt)
284{
285 *mrt = net->ipv6.mrt6;
286 return 0;
287}
288
289static int __net_init ip6mr_rules_init(struct net *net)
290{
291 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
292 return net->ipv6.mrt6 ? 0 : -ENOMEM;
293}
294
295static void __net_exit ip6mr_rules_exit(struct net *net)
296{
297 rtnl_lock();
298 ip6mr_free_table(net->ipv6.mrt6);
299 net->ipv6.mrt6 = NULL;
300 rtnl_unlock();
301}
302#endif
303
304static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
305{
306 struct mr6_table *mrt;
307 unsigned int i;
308
309 mrt = ip6mr_get_table(net, id);
310 if (mrt != NULL)
311 return mrt;
312
313 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
314 if (mrt == NULL)
315 return NULL;
316 mrt->id = id;
317 write_pnet(&mrt->net, net);
318
319 /* Forwarding cache */
320 for (i = 0; i < MFC6_LINES; i++)
321 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
322
323 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
324
325 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
326 (unsigned long)mrt);
327
328#ifdef CONFIG_IPV6_PIMSM_V2
329 mrt->mroute_reg_vif_num = -1;
330#endif
331#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
332 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
333#endif
334 return mrt;
335}
336
337static void ip6mr_free_table(struct mr6_table *mrt)
338{
339 del_timer(&mrt->ipmr_expire_timer);
340 mroute_clean_tables(mrt);
341 kfree(mrt);
342}
343
344#ifdef CONFIG_PROC_FS
345
346struct ipmr_mfc_iter {
347 struct seq_net_private p;
348 struct mr6_table *mrt;
349 struct list_head *cache;
350 int ct;
351};
352
353
354static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
355 struct ipmr_mfc_iter *it, loff_t pos)
356{
357 struct mr6_table *mrt = it->mrt;
358 struct mfc6_cache *mfc;
359
360 read_lock(&mrt_lock);
361 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
362 it->cache = &mrt->mfc6_cache_array[it->ct];
363 list_for_each_entry(mfc, it->cache, list)
364 if (pos-- == 0)
365 return mfc;
366 }
367 read_unlock(&mrt_lock);
368
369 spin_lock_bh(&mfc_unres_lock);
370 it->cache = &mrt->mfc6_unres_queue;
371 list_for_each_entry(mfc, it->cache, list)
372 if (pos-- == 0)
373 return mfc;
374 spin_unlock_bh(&mfc_unres_lock);
375
376 it->cache = NULL;
377 return NULL;
378}
379
380/*
381 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
382 */
383
384struct ipmr_vif_iter {
385 struct seq_net_private p;
386 struct mr6_table *mrt;
387 int ct;
388};
389
390static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
391 struct ipmr_vif_iter *iter,
392 loff_t pos)
393{
394 struct mr6_table *mrt = iter->mrt;
395
396 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
397 if (!MIF_EXISTS(mrt, iter->ct))
398 continue;
399 if (pos-- == 0)
400 return &mrt->vif6_table[iter->ct];
401 }
402 return NULL;
403}
404
405static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
406 __acquires(mrt_lock)
407{
408 struct ipmr_vif_iter *iter = seq->private;
409 struct net *net = seq_file_net(seq);
410 struct mr6_table *mrt;
411
412 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
413 if (mrt == NULL)
414 return ERR_PTR(-ENOENT);
415
416 iter->mrt = mrt;
417
418 read_lock(&mrt_lock);
419 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
420 : SEQ_START_TOKEN;
421}
422
423static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
424{
425 struct ipmr_vif_iter *iter = seq->private;
426 struct net *net = seq_file_net(seq);
427 struct mr6_table *mrt = iter->mrt;
428
429 ++*pos;
430 if (v == SEQ_START_TOKEN)
431 return ip6mr_vif_seq_idx(net, iter, 0);
432
433 while (++iter->ct < mrt->maxvif) {
434 if (!MIF_EXISTS(mrt, iter->ct))
435 continue;
436 return &mrt->vif6_table[iter->ct];
437 }
438 return NULL;
439}
440
441static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
442 __releases(mrt_lock)
443{
444 read_unlock(&mrt_lock);
445}
446
447static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
448{
449 struct ipmr_vif_iter *iter = seq->private;
450 struct mr6_table *mrt = iter->mrt;
451
452 if (v == SEQ_START_TOKEN) {
453 seq_puts(seq,
454 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
455 } else {
456 const struct mif_device *vif = v;
457 const char *name = vif->dev ? vif->dev->name : "none";
458
459 seq_printf(seq,
460 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
461 vif - mrt->vif6_table,
462 name, vif->bytes_in, vif->pkt_in,
463 vif->bytes_out, vif->pkt_out,
464 vif->flags);
465 }
466 return 0;
467}
468
469static const struct seq_operations ip6mr_vif_seq_ops = {
470 .start = ip6mr_vif_seq_start,
471 .next = ip6mr_vif_seq_next,
472 .stop = ip6mr_vif_seq_stop,
473 .show = ip6mr_vif_seq_show,
474};
475
476static int ip6mr_vif_open(struct inode *inode, struct file *file)
477{
478 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
479 sizeof(struct ipmr_vif_iter));
480}
481
482static const struct file_operations ip6mr_vif_fops = {
483 .owner = THIS_MODULE,
484 .open = ip6mr_vif_open,
485 .read = seq_read,
486 .llseek = seq_lseek,
487 .release = seq_release_net,
488};
489
490static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
491{
492 struct ipmr_mfc_iter *it = seq->private;
493 struct net *net = seq_file_net(seq);
494 struct mr6_table *mrt;
495
496 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
497 if (mrt == NULL)
498 return ERR_PTR(-ENOENT);
499
500 it->mrt = mrt;
501 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
502 : SEQ_START_TOKEN;
503}
504
505static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
506{
507 struct mfc6_cache *mfc = v;
508 struct ipmr_mfc_iter *it = seq->private;
509 struct net *net = seq_file_net(seq);
510 struct mr6_table *mrt = it->mrt;
511
512 ++*pos;
513
514 if (v == SEQ_START_TOKEN)
515 return ipmr_mfc_seq_idx(net, seq->private, 0);
516
517 if (mfc->list.next != it->cache)
518 return list_entry(mfc->list.next, struct mfc6_cache, list);
519
520 if (it->cache == &mrt->mfc6_unres_queue)
521 goto end_of_list;
522
523 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
524
525 while (++it->ct < MFC6_LINES) {
526 it->cache = &mrt->mfc6_cache_array[it->ct];
527 if (list_empty(it->cache))
528 continue;
529 return list_first_entry(it->cache, struct mfc6_cache, list);
530 }
531
532 /* exhausted cache_array, show unresolved */
533 read_unlock(&mrt_lock);
534 it->cache = &mrt->mfc6_unres_queue;
535 it->ct = 0;
536
537 spin_lock_bh(&mfc_unres_lock);
538 if (!list_empty(it->cache))
539 return list_first_entry(it->cache, struct mfc6_cache, list);
540
541 end_of_list:
542 spin_unlock_bh(&mfc_unres_lock);
543 it->cache = NULL;
544
545 return NULL;
546}
547
548static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
549{
550 struct ipmr_mfc_iter *it = seq->private;
551 struct mr6_table *mrt = it->mrt;
552
553 if (it->cache == &mrt->mfc6_unres_queue)
554 spin_unlock_bh(&mfc_unres_lock);
555 else if (it->cache == mrt->mfc6_cache_array)
556 read_unlock(&mrt_lock);
557}
558
559static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
560{
561 int n;
562
563 if (v == SEQ_START_TOKEN) {
564 seq_puts(seq,
565 "Group "
566 "Origin "
567 "Iif Pkts Bytes Wrong Oifs\n");
568 } else {
569 const struct mfc6_cache *mfc = v;
570 const struct ipmr_mfc_iter *it = seq->private;
571 struct mr6_table *mrt = it->mrt;
572
573 seq_printf(seq, "%pI6 %pI6 %-3hd",
574 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
575 mfc->mf6c_parent);
576
577 if (it->cache != &mrt->mfc6_unres_queue) {
578 seq_printf(seq, " %8lu %8lu %8lu",
579 mfc->mfc_un.res.pkt,
580 mfc->mfc_un.res.bytes,
581 mfc->mfc_un.res.wrong_if);
582 for (n = mfc->mfc_un.res.minvif;
583 n < mfc->mfc_un.res.maxvif; n++) {
584 if (MIF_EXISTS(mrt, n) &&
585 mfc->mfc_un.res.ttls[n] < 255)
586 seq_printf(seq,
587 " %2d:%-3d",
588 n, mfc->mfc_un.res.ttls[n]);
589 }
590 } else {
591 /* unresolved mfc_caches don't contain
592 * pkt, bytes and wrong_if values
593 */
594 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
595 }
596 seq_putc(seq, '\n');
597 }
598 return 0;
599}
600
601static const struct seq_operations ipmr_mfc_seq_ops = {
602 .start = ipmr_mfc_seq_start,
603 .next = ipmr_mfc_seq_next,
604 .stop = ipmr_mfc_seq_stop,
605 .show = ipmr_mfc_seq_show,
606};
607
608static int ipmr_mfc_open(struct inode *inode, struct file *file)
609{
610 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
611 sizeof(struct ipmr_mfc_iter));
612}
613
614static const struct file_operations ip6mr_mfc_fops = {
615 .owner = THIS_MODULE,
616 .open = ipmr_mfc_open,
617 .read = seq_read,
618 .llseek = seq_lseek,
619 .release = seq_release_net,
620};
621#endif
622
623#ifdef CONFIG_IPV6_PIMSM_V2
624
625static int pim6_rcv(struct sk_buff *skb)
626{
627 struct pimreghdr *pim;
628 struct ipv6hdr *encap;
629 struct net_device *reg_dev = NULL;
630 struct net *net = dev_net(skb->dev);
631 struct mr6_table *mrt;
632 struct flowi6 fl6 = {
633 .flowi6_iif = skb->dev->ifindex,
634 .flowi6_mark = skb->mark,
635 };
636 int reg_vif_num;
637
638 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
639 goto drop;
640
641 pim = (struct pimreghdr *)skb_transport_header(skb);
642 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
643 (pim->flags & PIM_NULL_REGISTER) ||
644 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
645 sizeof(*pim), IPPROTO_PIM,
646 csum_partial((void *)pim, sizeof(*pim), 0)) &&
647 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
648 goto drop;
649
650 /* check if the inner packet is destined to mcast group */
651 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
652 sizeof(*pim));
653
654 if (!ipv6_addr_is_multicast(&encap->daddr) ||
655 encap->payload_len == 0 ||
656 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
657 goto drop;
658
659 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
660 goto drop;
661 reg_vif_num = mrt->mroute_reg_vif_num;
662
663 read_lock(&mrt_lock);
664 if (reg_vif_num >= 0)
665 reg_dev = mrt->vif6_table[reg_vif_num].dev;
666 if (reg_dev)
667 dev_hold(reg_dev);
668 read_unlock(&mrt_lock);
669
670 if (reg_dev == NULL)
671 goto drop;
672
673 skb->mac_header = skb->network_header;
674 skb_pull(skb, (u8 *)encap - skb->data);
675 skb_reset_network_header(skb);
676 skb->protocol = htons(ETH_P_IPV6);
677 skb->ip_summed = CHECKSUM_NONE;
678
679 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
680
681 netif_rx(skb);
682
683 dev_put(reg_dev);
684 return 0;
685 drop:
686 kfree_skb(skb);
687 return 0;
688}
689
690static const struct inet6_protocol pim6_protocol = {
691 .handler = pim6_rcv,
692};
693
694/* Service routines creating virtual interfaces: PIMREG */
695
696static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
697 struct net_device *dev)
698{
699 struct net *net = dev_net(dev);
700 struct mr6_table *mrt;
701 struct flowi6 fl6 = {
702 .flowi6_oif = dev->ifindex,
703 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
704 .flowi6_mark = skb->mark,
705 };
706 int err;
707
708 err = ip6mr_fib_lookup(net, &fl6, &mrt);
709 if (err < 0) {
710 kfree_skb(skb);
711 return err;
712 }
713
714 read_lock(&mrt_lock);
715 dev->stats.tx_bytes += skb->len;
716 dev->stats.tx_packets++;
717 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
718 read_unlock(&mrt_lock);
719 kfree_skb(skb);
720 return NETDEV_TX_OK;
721}
722
723static const struct net_device_ops reg_vif_netdev_ops = {
724 .ndo_start_xmit = reg_vif_xmit,
725};
726
727static void reg_vif_setup(struct net_device *dev)
728{
729 dev->type = ARPHRD_PIMREG;
730 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
731 dev->flags = IFF_NOARP;
732 dev->netdev_ops = ®_vif_netdev_ops;
733 dev->destructor = free_netdev;
734 dev->features |= NETIF_F_NETNS_LOCAL;
735}
736
737static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
738{
739 struct net_device *dev;
740 char name[IFNAMSIZ];
741
742 if (mrt->id == RT6_TABLE_DFLT)
743 sprintf(name, "pim6reg");
744 else
745 sprintf(name, "pim6reg%u", mrt->id);
746
747 dev = alloc_netdev(0, name, reg_vif_setup);
748 if (dev == NULL)
749 return NULL;
750
751 dev_net_set(dev, net);
752
753 if (register_netdevice(dev)) {
754 free_netdev(dev);
755 return NULL;
756 }
757 dev->iflink = 0;
758
759 if (dev_open(dev))
760 goto failure;
761
762 dev_hold(dev);
763 return dev;
764
765failure:
766 /* allow the register to be completed before unregistering. */
767 rtnl_unlock();
768 rtnl_lock();
769
770 unregister_netdevice(dev);
771 return NULL;
772}
773#endif
774
775/*
776 * Delete a VIF entry
777 */
778
779static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
780{
781 struct mif_device *v;
782 struct net_device *dev;
783 struct inet6_dev *in6_dev;
784
785 if (vifi < 0 || vifi >= mrt->maxvif)
786 return -EADDRNOTAVAIL;
787
788 v = &mrt->vif6_table[vifi];
789
790 write_lock_bh(&mrt_lock);
791 dev = v->dev;
792 v->dev = NULL;
793
794 if (!dev) {
795 write_unlock_bh(&mrt_lock);
796 return -EADDRNOTAVAIL;
797 }
798
799#ifdef CONFIG_IPV6_PIMSM_V2
800 if (vifi == mrt->mroute_reg_vif_num)
801 mrt->mroute_reg_vif_num = -1;
802#endif
803
804 if (vifi + 1 == mrt->maxvif) {
805 int tmp;
806 for (tmp = vifi - 1; tmp >= 0; tmp--) {
807 if (MIF_EXISTS(mrt, tmp))
808 break;
809 }
810 mrt->maxvif = tmp + 1;
811 }
812
813 write_unlock_bh(&mrt_lock);
814
815 dev_set_allmulti(dev, -1);
816
817 in6_dev = __in6_dev_get(dev);
818 if (in6_dev) {
819 in6_dev->cnf.mc_forwarding--;
820 inet6_netconf_notify_devconf(dev_net(dev),
821 NETCONFA_MC_FORWARDING,
822 dev->ifindex, &in6_dev->cnf);
823 }
824
825 if (v->flags & MIFF_REGISTER)
826 unregister_netdevice_queue(dev, head);
827
828 dev_put(dev);
829 return 0;
830}
831
832static inline void ip6mr_cache_free(struct mfc6_cache *c)
833{
834 kmem_cache_free(mrt_cachep, c);
835}
836
837/* Destroy an unresolved cache entry, killing queued skbs
838 and reporting error to netlink readers.
839 */
840
841static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
842{
843 struct net *net = read_pnet(&mrt->net);
844 struct sk_buff *skb;
845
846 atomic_dec(&mrt->cache_resolve_queue_len);
847
848 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
849 if (ipv6_hdr(skb)->version == 0) {
850 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
851 nlh->nlmsg_type = NLMSG_ERROR;
852 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
853 skb_trim(skb, nlh->nlmsg_len);
854 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
855 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
856 } else
857 kfree_skb(skb);
858 }
859
860 ip6mr_cache_free(c);
861}
862
863
864/* Timer process for all the unresolved queue. */
865
866static void ipmr_do_expire_process(struct mr6_table *mrt)
867{
868 unsigned long now = jiffies;
869 unsigned long expires = 10 * HZ;
870 struct mfc6_cache *c, *next;
871
872 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
873 if (time_after(c->mfc_un.unres.expires, now)) {
874 /* not yet... */
875 unsigned long interval = c->mfc_un.unres.expires - now;
876 if (interval < expires)
877 expires = interval;
878 continue;
879 }
880
881 list_del(&c->list);
882 mr6_netlink_event(mrt, c, RTM_DELROUTE);
883 ip6mr_destroy_unres(mrt, c);
884 }
885
886 if (!list_empty(&mrt->mfc6_unres_queue))
887 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
888}
889
890static void ipmr_expire_process(unsigned long arg)
891{
892 struct mr6_table *mrt = (struct mr6_table *)arg;
893
894 if (!spin_trylock(&mfc_unres_lock)) {
895 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
896 return;
897 }
898
899 if (!list_empty(&mrt->mfc6_unres_queue))
900 ipmr_do_expire_process(mrt);
901
902 spin_unlock(&mfc_unres_lock);
903}
904
905/* Fill oifs list. It is called under write locked mrt_lock. */
906
907static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
908 unsigned char *ttls)
909{
910 int vifi;
911
912 cache->mfc_un.res.minvif = MAXMIFS;
913 cache->mfc_un.res.maxvif = 0;
914 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
915
916 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
917 if (MIF_EXISTS(mrt, vifi) &&
918 ttls[vifi] && ttls[vifi] < 255) {
919 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
920 if (cache->mfc_un.res.minvif > vifi)
921 cache->mfc_un.res.minvif = vifi;
922 if (cache->mfc_un.res.maxvif <= vifi)
923 cache->mfc_un.res.maxvif = vifi + 1;
924 }
925 }
926}
927
928static int mif6_add(struct net *net, struct mr6_table *mrt,
929 struct mif6ctl *vifc, int mrtsock)
930{
931 int vifi = vifc->mif6c_mifi;
932 struct mif_device *v = &mrt->vif6_table[vifi];
933 struct net_device *dev;
934 struct inet6_dev *in6_dev;
935 int err;
936
937 /* Is vif busy ? */
938 if (MIF_EXISTS(mrt, vifi))
939 return -EADDRINUSE;
940
941 switch (vifc->mif6c_flags) {
942#ifdef CONFIG_IPV6_PIMSM_V2
943 case MIFF_REGISTER:
944 /*
945 * Special Purpose VIF in PIM
946 * All the packets will be sent to the daemon
947 */
948 if (mrt->mroute_reg_vif_num >= 0)
949 return -EADDRINUSE;
950 dev = ip6mr_reg_vif(net, mrt);
951 if (!dev)
952 return -ENOBUFS;
953 err = dev_set_allmulti(dev, 1);
954 if (err) {
955 unregister_netdevice(dev);
956 dev_put(dev);
957 return err;
958 }
959 break;
960#endif
961 case 0:
962 dev = dev_get_by_index(net, vifc->mif6c_pifi);
963 if (!dev)
964 return -EADDRNOTAVAIL;
965 err = dev_set_allmulti(dev, 1);
966 if (err) {
967 dev_put(dev);
968 return err;
969 }
970 break;
971 default:
972 return -EINVAL;
973 }
974
975 in6_dev = __in6_dev_get(dev);
976 if (in6_dev) {
977 in6_dev->cnf.mc_forwarding++;
978 inet6_netconf_notify_devconf(dev_net(dev),
979 NETCONFA_MC_FORWARDING,
980 dev->ifindex, &in6_dev->cnf);
981 }
982
983 /*
984 * Fill in the VIF structures
985 */
986 v->rate_limit = vifc->vifc_rate_limit;
987 v->flags = vifc->mif6c_flags;
988 if (!mrtsock)
989 v->flags |= VIFF_STATIC;
990 v->threshold = vifc->vifc_threshold;
991 v->bytes_in = 0;
992 v->bytes_out = 0;
993 v->pkt_in = 0;
994 v->pkt_out = 0;
995 v->link = dev->ifindex;
996 if (v->flags & MIFF_REGISTER)
997 v->link = dev->iflink;
998
999 /* And finish update writing critical data */
1000 write_lock_bh(&mrt_lock);
1001 v->dev = dev;
1002#ifdef CONFIG_IPV6_PIMSM_V2
1003 if (v->flags & MIFF_REGISTER)
1004 mrt->mroute_reg_vif_num = vifi;
1005#endif
1006 if (vifi + 1 > mrt->maxvif)
1007 mrt->maxvif = vifi + 1;
1008 write_unlock_bh(&mrt_lock);
1009 return 0;
1010}
1011
1012static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1013 const struct in6_addr *origin,
1014 const struct in6_addr *mcastgrp)
1015{
1016 int line = MFC6_HASH(mcastgrp, origin);
1017 struct mfc6_cache *c;
1018
1019 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1020 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1021 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1022 return c;
1023 }
1024 return NULL;
1025}
1026
1027/* Look for a (*,*,oif) entry */
1028static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1029 mifi_t mifi)
1030{
1031 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1032 struct mfc6_cache *c;
1033
1034 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1035 if (ipv6_addr_any(&c->mf6c_origin) &&
1036 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1037 (c->mfc_un.res.ttls[mifi] < 255))
1038 return c;
1039
1040 return NULL;
1041}
1042
1043/* Look for a (*,G) entry */
1044static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1045 struct in6_addr *mcastgrp,
1046 mifi_t mifi)
1047{
1048 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1049 struct mfc6_cache *c, *proxy;
1050
1051 if (ipv6_addr_any(mcastgrp))
1052 goto skip;
1053
1054 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1055 if (ipv6_addr_any(&c->mf6c_origin) &&
1056 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1057 if (c->mfc_un.res.ttls[mifi] < 255)
1058 return c;
1059
1060 /* It's ok if the mifi is part of the static tree */
1061 proxy = ip6mr_cache_find_any_parent(mrt,
1062 c->mf6c_parent);
1063 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1064 return c;
1065 }
1066
1067skip:
1068 return ip6mr_cache_find_any_parent(mrt, mifi);
1069}
1070
1071/*
1072 * Allocate a multicast cache entry
1073 */
1074static struct mfc6_cache *ip6mr_cache_alloc(void)
1075{
1076 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1077 if (c == NULL)
1078 return NULL;
1079 c->mfc_un.res.minvif = MAXMIFS;
1080 return c;
1081}
1082
1083static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1084{
1085 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1086 if (c == NULL)
1087 return NULL;
1088 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1089 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1090 return c;
1091}
1092
1093/*
1094 * A cache entry has gone into a resolved state from queued
1095 */
1096
1097static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1098 struct mfc6_cache *uc, struct mfc6_cache *c)
1099{
1100 struct sk_buff *skb;
1101
1102 /*
1103 * Play the pending entries through our router
1104 */
1105
1106 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1107 if (ipv6_hdr(skb)->version == 0) {
1108 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1109
1110 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1111 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1112 } else {
1113 nlh->nlmsg_type = NLMSG_ERROR;
1114 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1115 skb_trim(skb, nlh->nlmsg_len);
1116 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1117 }
1118 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1119 } else
1120 ip6_mr_forward(net, mrt, skb, c);
1121 }
1122}
1123
1124/*
1125 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1126 * expects the following bizarre scheme.
1127 *
1128 * Called under mrt_lock.
1129 */
1130
1131static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1132 mifi_t mifi, int assert)
1133{
1134 struct sk_buff *skb;
1135 struct mrt6msg *msg;
1136 int ret;
1137
1138#ifdef CONFIG_IPV6_PIMSM_V2
1139 if (assert == MRT6MSG_WHOLEPKT)
1140 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1141 +sizeof(*msg));
1142 else
1143#endif
1144 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1145
1146 if (!skb)
1147 return -ENOBUFS;
1148
1149 /* I suppose that internal messages
1150 * do not require checksums */
1151
1152 skb->ip_summed = CHECKSUM_UNNECESSARY;
1153
1154#ifdef CONFIG_IPV6_PIMSM_V2
1155 if (assert == MRT6MSG_WHOLEPKT) {
1156 /* Ugly, but we have no choice with this interface.
1157 Duplicate old header, fix length etc.
1158 And all this only to mangle msg->im6_msgtype and
1159 to set msg->im6_mbz to "mbz" :-)
1160 */
1161 skb_push(skb, -skb_network_offset(pkt));
1162
1163 skb_push(skb, sizeof(*msg));
1164 skb_reset_transport_header(skb);
1165 msg = (struct mrt6msg *)skb_transport_header(skb);
1166 msg->im6_mbz = 0;
1167 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1168 msg->im6_mif = mrt->mroute_reg_vif_num;
1169 msg->im6_pad = 0;
1170 msg->im6_src = ipv6_hdr(pkt)->saddr;
1171 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1172
1173 skb->ip_summed = CHECKSUM_UNNECESSARY;
1174 } else
1175#endif
1176 {
1177 /*
1178 * Copy the IP header
1179 */
1180
1181 skb_put(skb, sizeof(struct ipv6hdr));
1182 skb_reset_network_header(skb);
1183 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1184
1185 /*
1186 * Add our header
1187 */
1188 skb_put(skb, sizeof(*msg));
1189 skb_reset_transport_header(skb);
1190 msg = (struct mrt6msg *)skb_transport_header(skb);
1191
1192 msg->im6_mbz = 0;
1193 msg->im6_msgtype = assert;
1194 msg->im6_mif = mifi;
1195 msg->im6_pad = 0;
1196 msg->im6_src = ipv6_hdr(pkt)->saddr;
1197 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1198
1199 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1200 skb->ip_summed = CHECKSUM_UNNECESSARY;
1201 }
1202
1203 if (mrt->mroute6_sk == NULL) {
1204 kfree_skb(skb);
1205 return -EINVAL;
1206 }
1207
1208 /*
1209 * Deliver to user space multicast routing algorithms
1210 */
1211 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1212 if (ret < 0) {
1213 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1214 kfree_skb(skb);
1215 }
1216
1217 return ret;
1218}
1219
1220/*
1221 * Queue a packet for resolution. It gets locked cache entry!
1222 */
1223
1224static int
1225ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1226{
1227 bool found = false;
1228 int err;
1229 struct mfc6_cache *c;
1230
1231 spin_lock_bh(&mfc_unres_lock);
1232 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1233 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1234 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1235 found = true;
1236 break;
1237 }
1238 }
1239
1240 if (!found) {
1241 /*
1242 * Create a new entry if allowable
1243 */
1244
1245 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1246 (c = ip6mr_cache_alloc_unres()) == NULL) {
1247 spin_unlock_bh(&mfc_unres_lock);
1248
1249 kfree_skb(skb);
1250 return -ENOBUFS;
1251 }
1252
1253 /*
1254 * Fill in the new cache entry
1255 */
1256 c->mf6c_parent = -1;
1257 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1258 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1259
1260 /*
1261 * Reflect first query at pim6sd
1262 */
1263 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1264 if (err < 0) {
1265 /* If the report failed throw the cache entry
1266 out - Brad Parker
1267 */
1268 spin_unlock_bh(&mfc_unres_lock);
1269
1270 ip6mr_cache_free(c);
1271 kfree_skb(skb);
1272 return err;
1273 }
1274
1275 atomic_inc(&mrt->cache_resolve_queue_len);
1276 list_add(&c->list, &mrt->mfc6_unres_queue);
1277 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1278
1279 ipmr_do_expire_process(mrt);
1280 }
1281
1282 /*
1283 * See if we can append the packet
1284 */
1285 if (c->mfc_un.unres.unresolved.qlen > 3) {
1286 kfree_skb(skb);
1287 err = -ENOBUFS;
1288 } else {
1289 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1290 err = 0;
1291 }
1292
1293 spin_unlock_bh(&mfc_unres_lock);
1294 return err;
1295}
1296
1297/*
1298 * MFC6 cache manipulation by user space
1299 */
1300
1301static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1302 int parent)
1303{
1304 int line;
1305 struct mfc6_cache *c, *next;
1306
1307 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1308
1309 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1310 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1311 ipv6_addr_equal(&c->mf6c_mcastgrp,
1312 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1313 (parent == -1 || parent == c->mf6c_parent)) {
1314 write_lock_bh(&mrt_lock);
1315 list_del(&c->list);
1316 write_unlock_bh(&mrt_lock);
1317
1318 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1319 ip6mr_cache_free(c);
1320 return 0;
1321 }
1322 }
1323 return -ENOENT;
1324}
1325
1326static int ip6mr_device_event(struct notifier_block *this,
1327 unsigned long event, void *ptr)
1328{
1329 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1330 struct net *net = dev_net(dev);
1331 struct mr6_table *mrt;
1332 struct mif_device *v;
1333 int ct;
1334 LIST_HEAD(list);
1335
1336 if (event != NETDEV_UNREGISTER)
1337 return NOTIFY_DONE;
1338
1339 ip6mr_for_each_table(mrt, net) {
1340 v = &mrt->vif6_table[0];
1341 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1342 if (v->dev == dev)
1343 mif6_delete(mrt, ct, &list);
1344 }
1345 }
1346 unregister_netdevice_many(&list);
1347
1348 return NOTIFY_DONE;
1349}
1350
1351static struct notifier_block ip6_mr_notifier = {
1352 .notifier_call = ip6mr_device_event
1353};
1354
1355/*
1356 * Setup for IP multicast routing
1357 */
1358
1359static int __net_init ip6mr_net_init(struct net *net)
1360{
1361 int err;
1362
1363 err = ip6mr_rules_init(net);
1364 if (err < 0)
1365 goto fail;
1366
1367#ifdef CONFIG_PROC_FS
1368 err = -ENOMEM;
1369 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1370 goto proc_vif_fail;
1371 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1372 goto proc_cache_fail;
1373#endif
1374
1375 return 0;
1376
1377#ifdef CONFIG_PROC_FS
1378proc_cache_fail:
1379 remove_proc_entry("ip6_mr_vif", net->proc_net);
1380proc_vif_fail:
1381 ip6mr_rules_exit(net);
1382#endif
1383fail:
1384 return err;
1385}
1386
1387static void __net_exit ip6mr_net_exit(struct net *net)
1388{
1389#ifdef CONFIG_PROC_FS
1390 remove_proc_entry("ip6_mr_cache", net->proc_net);
1391 remove_proc_entry("ip6_mr_vif", net->proc_net);
1392#endif
1393 ip6mr_rules_exit(net);
1394}
1395
1396static struct pernet_operations ip6mr_net_ops = {
1397 .init = ip6mr_net_init,
1398 .exit = ip6mr_net_exit,
1399};
1400
1401int __init ip6_mr_init(void)
1402{
1403 int err;
1404
1405 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1406 sizeof(struct mfc6_cache),
1407 0, SLAB_HWCACHE_ALIGN,
1408 NULL);
1409 if (!mrt_cachep)
1410 return -ENOMEM;
1411
1412 err = register_pernet_subsys(&ip6mr_net_ops);
1413 if (err)
1414 goto reg_pernet_fail;
1415
1416 err = register_netdevice_notifier(&ip6_mr_notifier);
1417 if (err)
1418 goto reg_notif_fail;
1419#ifdef CONFIG_IPV6_PIMSM_V2
1420 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1421 pr_err("%s: can't add PIM protocol\n", __func__);
1422 err = -EAGAIN;
1423 goto add_proto_fail;
1424 }
1425#endif
1426 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1427 ip6mr_rtm_dumproute, NULL);
1428 return 0;
1429#ifdef CONFIG_IPV6_PIMSM_V2
1430add_proto_fail:
1431 unregister_netdevice_notifier(&ip6_mr_notifier);
1432#endif
1433reg_notif_fail:
1434 unregister_pernet_subsys(&ip6mr_net_ops);
1435reg_pernet_fail:
1436 kmem_cache_destroy(mrt_cachep);
1437 return err;
1438}
1439
1440void ip6_mr_cleanup(void)
1441{
1442 unregister_netdevice_notifier(&ip6_mr_notifier);
1443 unregister_pernet_subsys(&ip6mr_net_ops);
1444 kmem_cache_destroy(mrt_cachep);
1445}
1446
1447static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1448 struct mf6cctl *mfc, int mrtsock, int parent)
1449{
1450 bool found = false;
1451 int line;
1452 struct mfc6_cache *uc, *c;
1453 unsigned char ttls[MAXMIFS];
1454 int i;
1455
1456 if (mfc->mf6cc_parent >= MAXMIFS)
1457 return -ENFILE;
1458
1459 memset(ttls, 255, MAXMIFS);
1460 for (i = 0; i < MAXMIFS; i++) {
1461 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1462 ttls[i] = 1;
1463
1464 }
1465
1466 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1467
1468 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1469 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1470 ipv6_addr_equal(&c->mf6c_mcastgrp,
1471 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1472 (parent == -1 || parent == mfc->mf6cc_parent)) {
1473 found = true;
1474 break;
1475 }
1476 }
1477
1478 if (found) {
1479 write_lock_bh(&mrt_lock);
1480 c->mf6c_parent = mfc->mf6cc_parent;
1481 ip6mr_update_thresholds(mrt, c, ttls);
1482 if (!mrtsock)
1483 c->mfc_flags |= MFC_STATIC;
1484 write_unlock_bh(&mrt_lock);
1485 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1486 return 0;
1487 }
1488
1489 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1490 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1491 return -EINVAL;
1492
1493 c = ip6mr_cache_alloc();
1494 if (c == NULL)
1495 return -ENOMEM;
1496
1497 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1498 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1499 c->mf6c_parent = mfc->mf6cc_parent;
1500 ip6mr_update_thresholds(mrt, c, ttls);
1501 if (!mrtsock)
1502 c->mfc_flags |= MFC_STATIC;
1503
1504 write_lock_bh(&mrt_lock);
1505 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1506 write_unlock_bh(&mrt_lock);
1507
1508 /*
1509 * Check to see if we resolved a queued list. If so we
1510 * need to send on the frames and tidy up.
1511 */
1512 found = false;
1513 spin_lock_bh(&mfc_unres_lock);
1514 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1515 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1516 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1517 list_del(&uc->list);
1518 atomic_dec(&mrt->cache_resolve_queue_len);
1519 found = true;
1520 break;
1521 }
1522 }
1523 if (list_empty(&mrt->mfc6_unres_queue))
1524 del_timer(&mrt->ipmr_expire_timer);
1525 spin_unlock_bh(&mfc_unres_lock);
1526
1527 if (found) {
1528 ip6mr_cache_resolve(net, mrt, uc, c);
1529 ip6mr_cache_free(uc);
1530 }
1531 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1532 return 0;
1533}
1534
1535/*
1536 * Close the multicast socket, and clear the vif tables etc
1537 */
1538
1539static void mroute_clean_tables(struct mr6_table *mrt)
1540{
1541 int i;
1542 LIST_HEAD(list);
1543 struct mfc6_cache *c, *next;
1544
1545 /*
1546 * Shut down all active vif entries
1547 */
1548 for (i = 0; i < mrt->maxvif; i++) {
1549 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1550 mif6_delete(mrt, i, &list);
1551 }
1552 unregister_netdevice_many(&list);
1553
1554 /*
1555 * Wipe the cache
1556 */
1557 for (i = 0; i < MFC6_LINES; i++) {
1558 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1559 if (c->mfc_flags & MFC_STATIC)
1560 continue;
1561 write_lock_bh(&mrt_lock);
1562 list_del(&c->list);
1563 write_unlock_bh(&mrt_lock);
1564
1565 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1566 ip6mr_cache_free(c);
1567 }
1568 }
1569
1570 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1571 spin_lock_bh(&mfc_unres_lock);
1572 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1573 list_del(&c->list);
1574 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1575 ip6mr_destroy_unres(mrt, c);
1576 }
1577 spin_unlock_bh(&mfc_unres_lock);
1578 }
1579}
1580
1581static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1582{
1583 int err = 0;
1584 struct net *net = sock_net(sk);
1585
1586 rtnl_lock();
1587 write_lock_bh(&mrt_lock);
1588 if (likely(mrt->mroute6_sk == NULL)) {
1589 mrt->mroute6_sk = sk;
1590 net->ipv6.devconf_all->mc_forwarding++;
1591 inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1592 NETCONFA_IFINDEX_ALL,
1593 net->ipv6.devconf_all);
1594 }
1595 else
1596 err = -EADDRINUSE;
1597 write_unlock_bh(&mrt_lock);
1598
1599 rtnl_unlock();
1600
1601 return err;
1602}
1603
1604int ip6mr_sk_done(struct sock *sk)
1605{
1606 int err = -EACCES;
1607 struct net *net = sock_net(sk);
1608 struct mr6_table *mrt;
1609
1610 rtnl_lock();
1611 ip6mr_for_each_table(mrt, net) {
1612 if (sk == mrt->mroute6_sk) {
1613 write_lock_bh(&mrt_lock);
1614 mrt->mroute6_sk = NULL;
1615 net->ipv6.devconf_all->mc_forwarding--;
1616 inet6_netconf_notify_devconf(net,
1617 NETCONFA_MC_FORWARDING,
1618 NETCONFA_IFINDEX_ALL,
1619 net->ipv6.devconf_all);
1620 write_unlock_bh(&mrt_lock);
1621
1622 mroute_clean_tables(mrt);
1623 err = 0;
1624 break;
1625 }
1626 }
1627 rtnl_unlock();
1628
1629 return err;
1630}
1631
1632struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1633{
1634 struct mr6_table *mrt;
1635 struct flowi6 fl6 = {
1636 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1637 .flowi6_oif = skb->dev->ifindex,
1638 .flowi6_mark = skb->mark,
1639 };
1640
1641 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1642 return NULL;
1643
1644 return mrt->mroute6_sk;
1645}
1646
1647/*
1648 * Socket options and virtual interface manipulation. The whole
1649 * virtual interface system is a complete heap, but unfortunately
1650 * that's how BSD mrouted happens to think. Maybe one day with a proper
1651 * MOSPF/PIM router set up we can clean this up.
1652 */
1653
1654int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1655{
1656 int ret, parent = 0;
1657 struct mif6ctl vif;
1658 struct mf6cctl mfc;
1659 mifi_t mifi;
1660 struct net *net = sock_net(sk);
1661 struct mr6_table *mrt;
1662
1663 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1664 if (mrt == NULL)
1665 return -ENOENT;
1666
1667 if (optname != MRT6_INIT) {
1668 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1669 return -EACCES;
1670 }
1671
1672 switch (optname) {
1673 case MRT6_INIT:
1674 if (sk->sk_type != SOCK_RAW ||
1675 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1676 return -EOPNOTSUPP;
1677 if (optlen < sizeof(int))
1678 return -EINVAL;
1679
1680 return ip6mr_sk_init(mrt, sk);
1681
1682 case MRT6_DONE:
1683 return ip6mr_sk_done(sk);
1684
1685 case MRT6_ADD_MIF:
1686 if (optlen < sizeof(vif))
1687 return -EINVAL;
1688 if (copy_from_user(&vif, optval, sizeof(vif)))
1689 return -EFAULT;
1690 if (vif.mif6c_mifi >= MAXMIFS)
1691 return -ENFILE;
1692 rtnl_lock();
1693 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1694 rtnl_unlock();
1695 return ret;
1696
1697 case MRT6_DEL_MIF:
1698 if (optlen < sizeof(mifi_t))
1699 return -EINVAL;
1700 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1701 return -EFAULT;
1702 rtnl_lock();
1703 ret = mif6_delete(mrt, mifi, NULL);
1704 rtnl_unlock();
1705 return ret;
1706
1707 /*
1708 * Manipulate the forwarding caches. These live
1709 * in a sort of kernel/user symbiosis.
1710 */
1711 case MRT6_ADD_MFC:
1712 case MRT6_DEL_MFC:
1713 parent = -1;
1714 case MRT6_ADD_MFC_PROXY:
1715 case MRT6_DEL_MFC_PROXY:
1716 if (optlen < sizeof(mfc))
1717 return -EINVAL;
1718 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1719 return -EFAULT;
1720 if (parent == 0)
1721 parent = mfc.mf6cc_parent;
1722 rtnl_lock();
1723 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1724 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1725 else
1726 ret = ip6mr_mfc_add(net, mrt, &mfc,
1727 sk == mrt->mroute6_sk, parent);
1728 rtnl_unlock();
1729 return ret;
1730
1731 /*
1732 * Control PIM assert (to activate pim will activate assert)
1733 */
1734 case MRT6_ASSERT:
1735 {
1736 int v;
1737
1738 if (optlen != sizeof(v))
1739 return -EINVAL;
1740 if (get_user(v, (int __user *)optval))
1741 return -EFAULT;
1742 mrt->mroute_do_assert = v;
1743 return 0;
1744 }
1745
1746#ifdef CONFIG_IPV6_PIMSM_V2
1747 case MRT6_PIM:
1748 {
1749 int v;
1750
1751 if (optlen != sizeof(v))
1752 return -EINVAL;
1753 if (get_user(v, (int __user *)optval))
1754 return -EFAULT;
1755 v = !!v;
1756 rtnl_lock();
1757 ret = 0;
1758 if (v != mrt->mroute_do_pim) {
1759 mrt->mroute_do_pim = v;
1760 mrt->mroute_do_assert = v;
1761 }
1762 rtnl_unlock();
1763 return ret;
1764 }
1765
1766#endif
1767#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1768 case MRT6_TABLE:
1769 {
1770 u32 v;
1771
1772 if (optlen != sizeof(u32))
1773 return -EINVAL;
1774 if (get_user(v, (u32 __user *)optval))
1775 return -EFAULT;
1776 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1777 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1778 return -EINVAL;
1779 if (sk == mrt->mroute6_sk)
1780 return -EBUSY;
1781
1782 rtnl_lock();
1783 ret = 0;
1784 if (!ip6mr_new_table(net, v))
1785 ret = -ENOMEM;
1786 raw6_sk(sk)->ip6mr_table = v;
1787 rtnl_unlock();
1788 return ret;
1789 }
1790#endif
1791 /*
1792 * Spurious command, or MRT6_VERSION which you cannot
1793 * set.
1794 */
1795 default:
1796 return -ENOPROTOOPT;
1797 }
1798}
1799
1800/*
1801 * Getsock opt support for the multicast routing system.
1802 */
1803
1804int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1805 int __user *optlen)
1806{
1807 int olr;
1808 int val;
1809 struct net *net = sock_net(sk);
1810 struct mr6_table *mrt;
1811
1812 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1813 if (mrt == NULL)
1814 return -ENOENT;
1815
1816 switch (optname) {
1817 case MRT6_VERSION:
1818 val = 0x0305;
1819 break;
1820#ifdef CONFIG_IPV6_PIMSM_V2
1821 case MRT6_PIM:
1822 val = mrt->mroute_do_pim;
1823 break;
1824#endif
1825 case MRT6_ASSERT:
1826 val = mrt->mroute_do_assert;
1827 break;
1828 default:
1829 return -ENOPROTOOPT;
1830 }
1831
1832 if (get_user(olr, optlen))
1833 return -EFAULT;
1834
1835 olr = min_t(int, olr, sizeof(int));
1836 if (olr < 0)
1837 return -EINVAL;
1838
1839 if (put_user(olr, optlen))
1840 return -EFAULT;
1841 if (copy_to_user(optval, &val, olr))
1842 return -EFAULT;
1843 return 0;
1844}
1845
1846/*
1847 * The IP multicast ioctl support routines.
1848 */
1849
1850int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1851{
1852 struct sioc_sg_req6 sr;
1853 struct sioc_mif_req6 vr;
1854 struct mif_device *vif;
1855 struct mfc6_cache *c;
1856 struct net *net = sock_net(sk);
1857 struct mr6_table *mrt;
1858
1859 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1860 if (mrt == NULL)
1861 return -ENOENT;
1862
1863 switch (cmd) {
1864 case SIOCGETMIFCNT_IN6:
1865 if (copy_from_user(&vr, arg, sizeof(vr)))
1866 return -EFAULT;
1867 if (vr.mifi >= mrt->maxvif)
1868 return -EINVAL;
1869 read_lock(&mrt_lock);
1870 vif = &mrt->vif6_table[vr.mifi];
1871 if (MIF_EXISTS(mrt, vr.mifi)) {
1872 vr.icount = vif->pkt_in;
1873 vr.ocount = vif->pkt_out;
1874 vr.ibytes = vif->bytes_in;
1875 vr.obytes = vif->bytes_out;
1876 read_unlock(&mrt_lock);
1877
1878 if (copy_to_user(arg, &vr, sizeof(vr)))
1879 return -EFAULT;
1880 return 0;
1881 }
1882 read_unlock(&mrt_lock);
1883 return -EADDRNOTAVAIL;
1884 case SIOCGETSGCNT_IN6:
1885 if (copy_from_user(&sr, arg, sizeof(sr)))
1886 return -EFAULT;
1887
1888 read_lock(&mrt_lock);
1889 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1890 if (c) {
1891 sr.pktcnt = c->mfc_un.res.pkt;
1892 sr.bytecnt = c->mfc_un.res.bytes;
1893 sr.wrong_if = c->mfc_un.res.wrong_if;
1894 read_unlock(&mrt_lock);
1895
1896 if (copy_to_user(arg, &sr, sizeof(sr)))
1897 return -EFAULT;
1898 return 0;
1899 }
1900 read_unlock(&mrt_lock);
1901 return -EADDRNOTAVAIL;
1902 default:
1903 return -ENOIOCTLCMD;
1904 }
1905}
1906
1907#ifdef CONFIG_COMPAT
1908struct compat_sioc_sg_req6 {
1909 struct sockaddr_in6 src;
1910 struct sockaddr_in6 grp;
1911 compat_ulong_t pktcnt;
1912 compat_ulong_t bytecnt;
1913 compat_ulong_t wrong_if;
1914};
1915
1916struct compat_sioc_mif_req6 {
1917 mifi_t mifi;
1918 compat_ulong_t icount;
1919 compat_ulong_t ocount;
1920 compat_ulong_t ibytes;
1921 compat_ulong_t obytes;
1922};
1923
1924int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1925{
1926 struct compat_sioc_sg_req6 sr;
1927 struct compat_sioc_mif_req6 vr;
1928 struct mif_device *vif;
1929 struct mfc6_cache *c;
1930 struct net *net = sock_net(sk);
1931 struct mr6_table *mrt;
1932
1933 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1934 if (mrt == NULL)
1935 return -ENOENT;
1936
1937 switch (cmd) {
1938 case SIOCGETMIFCNT_IN6:
1939 if (copy_from_user(&vr, arg, sizeof(vr)))
1940 return -EFAULT;
1941 if (vr.mifi >= mrt->maxvif)
1942 return -EINVAL;
1943 read_lock(&mrt_lock);
1944 vif = &mrt->vif6_table[vr.mifi];
1945 if (MIF_EXISTS(mrt, vr.mifi)) {
1946 vr.icount = vif->pkt_in;
1947 vr.ocount = vif->pkt_out;
1948 vr.ibytes = vif->bytes_in;
1949 vr.obytes = vif->bytes_out;
1950 read_unlock(&mrt_lock);
1951
1952 if (copy_to_user(arg, &vr, sizeof(vr)))
1953 return -EFAULT;
1954 return 0;
1955 }
1956 read_unlock(&mrt_lock);
1957 return -EADDRNOTAVAIL;
1958 case SIOCGETSGCNT_IN6:
1959 if (copy_from_user(&sr, arg, sizeof(sr)))
1960 return -EFAULT;
1961
1962 read_lock(&mrt_lock);
1963 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1964 if (c) {
1965 sr.pktcnt = c->mfc_un.res.pkt;
1966 sr.bytecnt = c->mfc_un.res.bytes;
1967 sr.wrong_if = c->mfc_un.res.wrong_if;
1968 read_unlock(&mrt_lock);
1969
1970 if (copy_to_user(arg, &sr, sizeof(sr)))
1971 return -EFAULT;
1972 return 0;
1973 }
1974 read_unlock(&mrt_lock);
1975 return -EADDRNOTAVAIL;
1976 default:
1977 return -ENOIOCTLCMD;
1978 }
1979}
1980#endif
1981
1982static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1983{
1984 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1985 IPSTATS_MIB_OUTFORWDATAGRAMS);
1986 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1987 IPSTATS_MIB_OUTOCTETS, skb->len);
1988 return dst_output(skb);
1989}
1990
1991/*
1992 * Processing handlers for ip6mr_forward
1993 */
1994
1995static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1996 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1997{
1998 struct ipv6hdr *ipv6h;
1999 struct mif_device *vif = &mrt->vif6_table[vifi];
2000 struct net_device *dev;
2001 struct dst_entry *dst;
2002 struct flowi6 fl6;
2003
2004 if (vif->dev == NULL)
2005 goto out_free;
2006
2007#ifdef CONFIG_IPV6_PIMSM_V2
2008 if (vif->flags & MIFF_REGISTER) {
2009 vif->pkt_out++;
2010 vif->bytes_out += skb->len;
2011 vif->dev->stats.tx_bytes += skb->len;
2012 vif->dev->stats.tx_packets++;
2013 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2014 goto out_free;
2015 }
2016#endif
2017
2018 ipv6h = ipv6_hdr(skb);
2019
2020 fl6 = (struct flowi6) {
2021 .flowi6_oif = vif->link,
2022 .daddr = ipv6h->daddr,
2023 };
2024
2025 dst = ip6_route_output(net, NULL, &fl6);
2026 if (dst->error) {
2027 dst_release(dst);
2028 goto out_free;
2029 }
2030
2031 skb_dst_drop(skb);
2032 skb_dst_set(skb, dst);
2033
2034 /*
2035 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2036 * not only before forwarding, but after forwarding on all output
2037 * interfaces. It is clear, if mrouter runs a multicasting
2038 * program, it should receive packets not depending to what interface
2039 * program is joined.
2040 * If we will not make it, the program will have to join on all
2041 * interfaces. On the other hand, multihoming host (or router, but
2042 * not mrouter) cannot join to more than one interface - it will
2043 * result in receiving multiple packets.
2044 */
2045 dev = vif->dev;
2046 skb->dev = dev;
2047 vif->pkt_out++;
2048 vif->bytes_out += skb->len;
2049
2050 /* We are about to write */
2051 /* XXX: extension headers? */
2052 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2053 goto out_free;
2054
2055 ipv6h = ipv6_hdr(skb);
2056 ipv6h->hop_limit--;
2057
2058 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2059
2060 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
2061 ip6mr_forward2_finish);
2062
2063out_free:
2064 kfree_skb(skb);
2065 return 0;
2066}
2067
2068static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2069{
2070 int ct;
2071
2072 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2073 if (mrt->vif6_table[ct].dev == dev)
2074 break;
2075 }
2076 return ct;
2077}
2078
2079static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2080 struct sk_buff *skb, struct mfc6_cache *cache)
2081{
2082 int psend = -1;
2083 int vif, ct;
2084 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2085
2086 vif = cache->mf6c_parent;
2087 cache->mfc_un.res.pkt++;
2088 cache->mfc_un.res.bytes += skb->len;
2089
2090 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2091 struct mfc6_cache *cache_proxy;
2092
2093 /* For an (*,G) entry, we only check that the incomming
2094 * interface is part of the static tree.
2095 */
2096 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2097 if (cache_proxy &&
2098 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2099 goto forward;
2100 }
2101
2102 /*
2103 * Wrong interface: drop packet and (maybe) send PIM assert.
2104 */
2105 if (mrt->vif6_table[vif].dev != skb->dev) {
2106 cache->mfc_un.res.wrong_if++;
2107
2108 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2109 /* pimsm uses asserts, when switching from RPT to SPT,
2110 so that we cannot check that packet arrived on an oif.
2111 It is bad, but otherwise we would need to move pretty
2112 large chunk of pimd to kernel. Ough... --ANK
2113 */
2114 (mrt->mroute_do_pim ||
2115 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2116 time_after(jiffies,
2117 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2118 cache->mfc_un.res.last_assert = jiffies;
2119 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2120 }
2121 goto dont_forward;
2122 }
2123
2124forward:
2125 mrt->vif6_table[vif].pkt_in++;
2126 mrt->vif6_table[vif].bytes_in += skb->len;
2127
2128 /*
2129 * Forward the frame
2130 */
2131 if (ipv6_addr_any(&cache->mf6c_origin) &&
2132 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2133 if (true_vifi >= 0 &&
2134 true_vifi != cache->mf6c_parent &&
2135 ipv6_hdr(skb)->hop_limit >
2136 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2137 /* It's an (*,*) entry and the packet is not coming from
2138 * the upstream: forward the packet to the upstream
2139 * only.
2140 */
2141 psend = cache->mf6c_parent;
2142 goto last_forward;
2143 }
2144 goto dont_forward;
2145 }
2146 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2147 /* For (*,G) entry, don't forward to the incoming interface */
2148 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2149 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2150 if (psend != -1) {
2151 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2152 if (skb2)
2153 ip6mr_forward2(net, mrt, skb2, cache, psend);
2154 }
2155 psend = ct;
2156 }
2157 }
2158last_forward:
2159 if (psend != -1) {
2160 ip6mr_forward2(net, mrt, skb, cache, psend);
2161 return;
2162 }
2163
2164dont_forward:
2165 kfree_skb(skb);
2166}
2167
2168
2169/*
2170 * Multicast packets for forwarding arrive here
2171 */
2172
2173int ip6_mr_input(struct sk_buff *skb)
2174{
2175 struct mfc6_cache *cache;
2176 struct net *net = dev_net(skb->dev);
2177 struct mr6_table *mrt;
2178 struct flowi6 fl6 = {
2179 .flowi6_iif = skb->dev->ifindex,
2180 .flowi6_mark = skb->mark,
2181 };
2182 int err;
2183
2184 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2185 if (err < 0) {
2186 kfree_skb(skb);
2187 return err;
2188 }
2189
2190 read_lock(&mrt_lock);
2191 cache = ip6mr_cache_find(mrt,
2192 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2193 if (cache == NULL) {
2194 int vif = ip6mr_find_vif(mrt, skb->dev);
2195
2196 if (vif >= 0)
2197 cache = ip6mr_cache_find_any(mrt,
2198 &ipv6_hdr(skb)->daddr,
2199 vif);
2200 }
2201
2202 /*
2203 * No usable cache entry
2204 */
2205 if (cache == NULL) {
2206 int vif;
2207
2208 vif = ip6mr_find_vif(mrt, skb->dev);
2209 if (vif >= 0) {
2210 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2211 read_unlock(&mrt_lock);
2212
2213 return err;
2214 }
2215 read_unlock(&mrt_lock);
2216 kfree_skb(skb);
2217 return -ENODEV;
2218 }
2219
2220 ip6_mr_forward(net, mrt, skb, cache);
2221
2222 read_unlock(&mrt_lock);
2223
2224 return 0;
2225}
2226
2227
2228static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2229 struct mfc6_cache *c, struct rtmsg *rtm)
2230{
2231 int ct;
2232 struct rtnexthop *nhp;
2233 struct nlattr *mp_attr;
2234 struct rta_mfc_stats mfcs;
2235
2236 /* If cache is unresolved, don't try to parse IIF and OIF */
2237 if (c->mf6c_parent >= MAXMIFS)
2238 return -ENOENT;
2239
2240 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2241 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2242 return -EMSGSIZE;
2243 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2244 if (mp_attr == NULL)
2245 return -EMSGSIZE;
2246
2247 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2248 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2249 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2250 if (nhp == NULL) {
2251 nla_nest_cancel(skb, mp_attr);
2252 return -EMSGSIZE;
2253 }
2254
2255 nhp->rtnh_flags = 0;
2256 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2257 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2258 nhp->rtnh_len = sizeof(*nhp);
2259 }
2260 }
2261
2262 nla_nest_end(skb, mp_attr);
2263
2264 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2265 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2266 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2267 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2268 return -EMSGSIZE;
2269
2270 rtm->rtm_type = RTN_MULTICAST;
2271 return 1;
2272}
2273
2274int ip6mr_get_route(struct net *net,
2275 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2276{
2277 int err;
2278 struct mr6_table *mrt;
2279 struct mfc6_cache *cache;
2280 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2281
2282 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2283 if (mrt == NULL)
2284 return -ENOENT;
2285
2286 read_lock(&mrt_lock);
2287 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2288 if (!cache && skb->dev) {
2289 int vif = ip6mr_find_vif(mrt, skb->dev);
2290
2291 if (vif >= 0)
2292 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2293 vif);
2294 }
2295
2296 if (!cache) {
2297 struct sk_buff *skb2;
2298 struct ipv6hdr *iph;
2299 struct net_device *dev;
2300 int vif;
2301
2302 if (nowait) {
2303 read_unlock(&mrt_lock);
2304 return -EAGAIN;
2305 }
2306
2307 dev = skb->dev;
2308 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2309 read_unlock(&mrt_lock);
2310 return -ENODEV;
2311 }
2312
2313 /* really correct? */
2314 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2315 if (!skb2) {
2316 read_unlock(&mrt_lock);
2317 return -ENOMEM;
2318 }
2319
2320 skb_reset_transport_header(skb2);
2321
2322 skb_put(skb2, sizeof(struct ipv6hdr));
2323 skb_reset_network_header(skb2);
2324
2325 iph = ipv6_hdr(skb2);
2326 iph->version = 0;
2327 iph->priority = 0;
2328 iph->flow_lbl[0] = 0;
2329 iph->flow_lbl[1] = 0;
2330 iph->flow_lbl[2] = 0;
2331 iph->payload_len = 0;
2332 iph->nexthdr = IPPROTO_NONE;
2333 iph->hop_limit = 0;
2334 iph->saddr = rt->rt6i_src.addr;
2335 iph->daddr = rt->rt6i_dst.addr;
2336
2337 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2338 read_unlock(&mrt_lock);
2339
2340 return err;
2341 }
2342
2343 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2344 cache->mfc_flags |= MFC_NOTIFY;
2345
2346 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2347 read_unlock(&mrt_lock);
2348 return err;
2349}
2350
2351static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2352 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2353 int flags)
2354{
2355 struct nlmsghdr *nlh;
2356 struct rtmsg *rtm;
2357 int err;
2358
2359 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2360 if (nlh == NULL)
2361 return -EMSGSIZE;
2362
2363 rtm = nlmsg_data(nlh);
2364 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2365 rtm->rtm_dst_len = 128;
2366 rtm->rtm_src_len = 128;
2367 rtm->rtm_tos = 0;
2368 rtm->rtm_table = mrt->id;
2369 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2370 goto nla_put_failure;
2371 rtm->rtm_type = RTN_MULTICAST;
2372 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2373 if (c->mfc_flags & MFC_STATIC)
2374 rtm->rtm_protocol = RTPROT_STATIC;
2375 else
2376 rtm->rtm_protocol = RTPROT_MROUTED;
2377 rtm->rtm_flags = 0;
2378
2379 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2380 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2381 goto nla_put_failure;
2382 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2383 /* do not break the dump if cache is unresolved */
2384 if (err < 0 && err != -ENOENT)
2385 goto nla_put_failure;
2386
2387 return nlmsg_end(skb, nlh);
2388
2389nla_put_failure:
2390 nlmsg_cancel(skb, nlh);
2391 return -EMSGSIZE;
2392}
2393
2394static int mr6_msgsize(bool unresolved, int maxvif)
2395{
2396 size_t len =
2397 NLMSG_ALIGN(sizeof(struct rtmsg))
2398 + nla_total_size(4) /* RTA_TABLE */
2399 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2400 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2401 ;
2402
2403 if (!unresolved)
2404 len = len
2405 + nla_total_size(4) /* RTA_IIF */
2406 + nla_total_size(0) /* RTA_MULTIPATH */
2407 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2408 /* RTA_MFC_STATS */
2409 + nla_total_size(sizeof(struct rta_mfc_stats))
2410 ;
2411
2412 return len;
2413}
2414
2415static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2416 int cmd)
2417{
2418 struct net *net = read_pnet(&mrt->net);
2419 struct sk_buff *skb;
2420 int err = -ENOBUFS;
2421
2422 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2423 GFP_ATOMIC);
2424 if (skb == NULL)
2425 goto errout;
2426
2427 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2428 if (err < 0)
2429 goto errout;
2430
2431 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2432 return;
2433
2434errout:
2435 kfree_skb(skb);
2436 if (err < 0)
2437 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2438}
2439
2440static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2441{
2442 struct net *net = sock_net(skb->sk);
2443 struct mr6_table *mrt;
2444 struct mfc6_cache *mfc;
2445 unsigned int t = 0, s_t;
2446 unsigned int h = 0, s_h;
2447 unsigned int e = 0, s_e;
2448
2449 s_t = cb->args[0];
2450 s_h = cb->args[1];
2451 s_e = cb->args[2];
2452
2453 read_lock(&mrt_lock);
2454 ip6mr_for_each_table(mrt, net) {
2455 if (t < s_t)
2456 goto next_table;
2457 if (t > s_t)
2458 s_h = 0;
2459 for (h = s_h; h < MFC6_LINES; h++) {
2460 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2461 if (e < s_e)
2462 goto next_entry;
2463 if (ip6mr_fill_mroute(mrt, skb,
2464 NETLINK_CB(cb->skb).portid,
2465 cb->nlh->nlmsg_seq,
2466 mfc, RTM_NEWROUTE,
2467 NLM_F_MULTI) < 0)
2468 goto done;
2469next_entry:
2470 e++;
2471 }
2472 e = s_e = 0;
2473 }
2474 spin_lock_bh(&mfc_unres_lock);
2475 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2476 if (e < s_e)
2477 goto next_entry2;
2478 if (ip6mr_fill_mroute(mrt, skb,
2479 NETLINK_CB(cb->skb).portid,
2480 cb->nlh->nlmsg_seq,
2481 mfc, RTM_NEWROUTE,
2482 NLM_F_MULTI) < 0) {
2483 spin_unlock_bh(&mfc_unres_lock);
2484 goto done;
2485 }
2486next_entry2:
2487 e++;
2488 }
2489 spin_unlock_bh(&mfc_unres_lock);
2490 e = s_e = 0;
2491 s_h = 0;
2492next_table:
2493 t++;
2494 }
2495done:
2496 read_unlock(&mrt_lock);
2497
2498 cb->args[2] = e;
2499 cb->args[1] = h;
2500 cb->args[0] = t;
2501
2502 return skb->len;
2503}
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <linux/uaccess.h>
20#include <linux/types.h>
21#include <linux/sched.h>
22#include <linux/errno.h>
23#include <linux/mm.h>
24#include <linux/kernel.h>
25#include <linux/fcntl.h>
26#include <linux/stat.h>
27#include <linux/socket.h>
28#include <linux/inet.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/init.h>
34#include <linux/compat.h>
35#include <net/protocol.h>
36#include <linux/skbuff.h>
37#include <net/raw.h>
38#include <linux/notifier.h>
39#include <linux/if_arp.h>
40#include <net/checksum.h>
41#include <net/netlink.h>
42#include <net/fib_rules.h>
43
44#include <net/ipv6.h>
45#include <net/ip6_route.h>
46#include <linux/mroute6.h>
47#include <linux/pim.h>
48#include <net/addrconf.h>
49#include <linux/netfilter_ipv6.h>
50#include <linux/export.h>
51#include <net/ip6_checksum.h>
52#include <linux/netconf.h>
53
54struct ip6mr_rule {
55 struct fib_rule common;
56};
57
58struct ip6mr_result {
59 struct mr_table *mrt;
60};
61
62/* Big lock, protecting vif table, mrt cache and mroute socket state.
63 Note that the changes are semaphored via rtnl_lock.
64 */
65
66static DEFINE_RWLOCK(mrt_lock);
67
68/* Multicast router control variables */
69
70/* Special spinlock for queue of unresolved entries */
71static DEFINE_SPINLOCK(mfc_unres_lock);
72
73/* We return to original Alan's scheme. Hash table of resolved
74 entries is changed only in process context and protected
75 with weak lock mrt_lock. Queue of unresolved entries is protected
76 with strong spinlock mfc_unres_lock.
77
78 In this case data path is free of exclusive locks at all.
79 */
80
81static struct kmem_cache *mrt_cachep __read_mostly;
82
83static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
84static void ip6mr_free_table(struct mr_table *mrt);
85
86static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
87 struct sk_buff *skb, struct mfc6_cache *cache);
88static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
91 int cmd);
92static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95static void mroute_clean_tables(struct mr_table *mrt, bool all);
96static void ipmr_expire_process(struct timer_list *t);
97
98#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99#define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
101
102static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
104{
105 struct mr_table *ret;
106
107 if (!mrt)
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
110 else
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
113
114 if (&ret->list == &net->ipv6.mr6_tables)
115 return NULL;
116 return ret;
117}
118
119static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
120{
121 struct mr_table *mrt;
122
123 ip6mr_for_each_table(mrt, net) {
124 if (mrt->id == id)
125 return mrt;
126 }
127 return NULL;
128}
129
130static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
131 struct mr_table **mrt)
132{
133 int err;
134 struct ip6mr_result res;
135 struct fib_lookup_arg arg = {
136 .result = &res,
137 .flags = FIB_LOOKUP_NOREF,
138 };
139
140 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
141 flowi6_to_flowi(flp6), 0, &arg);
142 if (err < 0)
143 return err;
144 *mrt = res.mrt;
145 return 0;
146}
147
148static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
149 int flags, struct fib_lookup_arg *arg)
150{
151 struct ip6mr_result *res = arg->result;
152 struct mr_table *mrt;
153
154 switch (rule->action) {
155 case FR_ACT_TO_TBL:
156 break;
157 case FR_ACT_UNREACHABLE:
158 return -ENETUNREACH;
159 case FR_ACT_PROHIBIT:
160 return -EACCES;
161 case FR_ACT_BLACKHOLE:
162 default:
163 return -EINVAL;
164 }
165
166 mrt = ip6mr_get_table(rule->fr_net, rule->table);
167 if (!mrt)
168 return -EAGAIN;
169 res->mrt = mrt;
170 return 0;
171}
172
173static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
174{
175 return 1;
176}
177
178static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
179 FRA_GENERIC_POLICY,
180};
181
182static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
183 struct fib_rule_hdr *frh, struct nlattr **tb)
184{
185 return 0;
186}
187
188static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
189 struct nlattr **tb)
190{
191 return 1;
192}
193
194static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
195 struct fib_rule_hdr *frh)
196{
197 frh->dst_len = 0;
198 frh->src_len = 0;
199 frh->tos = 0;
200 return 0;
201}
202
203static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
204 .family = RTNL_FAMILY_IP6MR,
205 .rule_size = sizeof(struct ip6mr_rule),
206 .addr_size = sizeof(struct in6_addr),
207 .action = ip6mr_rule_action,
208 .match = ip6mr_rule_match,
209 .configure = ip6mr_rule_configure,
210 .compare = ip6mr_rule_compare,
211 .fill = ip6mr_rule_fill,
212 .nlgroup = RTNLGRP_IPV6_RULE,
213 .policy = ip6mr_rule_policy,
214 .owner = THIS_MODULE,
215};
216
217static int __net_init ip6mr_rules_init(struct net *net)
218{
219 struct fib_rules_ops *ops;
220 struct mr_table *mrt;
221 int err;
222
223 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
224 if (IS_ERR(ops))
225 return PTR_ERR(ops);
226
227 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
228
229 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
230 if (!mrt) {
231 err = -ENOMEM;
232 goto err1;
233 }
234
235 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
236 if (err < 0)
237 goto err2;
238
239 net->ipv6.mr6_rules_ops = ops;
240 return 0;
241
242err2:
243 ip6mr_free_table(mrt);
244err1:
245 fib_rules_unregister(ops);
246 return err;
247}
248
249static void __net_exit ip6mr_rules_exit(struct net *net)
250{
251 struct mr_table *mrt, *next;
252
253 rtnl_lock();
254 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
255 list_del(&mrt->list);
256 ip6mr_free_table(mrt);
257 }
258 fib_rules_unregister(net->ipv6.mr6_rules_ops);
259 rtnl_unlock();
260}
261
262static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
263{
264 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
265}
266
267static unsigned int ip6mr_rules_seq_read(struct net *net)
268{
269 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
270}
271
272bool ip6mr_rule_default(const struct fib_rule *rule)
273{
274 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
275 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
276}
277EXPORT_SYMBOL(ip6mr_rule_default);
278#else
279#define ip6mr_for_each_table(mrt, net) \
280 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
281
282static struct mr_table *ip6mr_mr_table_iter(struct net *net,
283 struct mr_table *mrt)
284{
285 if (!mrt)
286 return net->ipv6.mrt6;
287 return NULL;
288}
289
290static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
291{
292 return net->ipv6.mrt6;
293}
294
295static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
296 struct mr_table **mrt)
297{
298 *mrt = net->ipv6.mrt6;
299 return 0;
300}
301
302static int __net_init ip6mr_rules_init(struct net *net)
303{
304 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
305 return net->ipv6.mrt6 ? 0 : -ENOMEM;
306}
307
308static void __net_exit ip6mr_rules_exit(struct net *net)
309{
310 rtnl_lock();
311 ip6mr_free_table(net->ipv6.mrt6);
312 net->ipv6.mrt6 = NULL;
313 rtnl_unlock();
314}
315
316static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
317{
318 return 0;
319}
320
321static unsigned int ip6mr_rules_seq_read(struct net *net)
322{
323 return 0;
324}
325#endif
326
327static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
328 const void *ptr)
329{
330 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
331 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
332
333 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
334 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
335}
336
337static const struct rhashtable_params ip6mr_rht_params = {
338 .head_offset = offsetof(struct mr_mfc, mnode),
339 .key_offset = offsetof(struct mfc6_cache, cmparg),
340 .key_len = sizeof(struct mfc6_cache_cmp_arg),
341 .nelem_hint = 3,
342 .locks_mul = 1,
343 .obj_cmpfn = ip6mr_hash_cmp,
344 .automatic_shrinking = true,
345};
346
347static void ip6mr_new_table_set(struct mr_table *mrt,
348 struct net *net)
349{
350#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
351 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
352#endif
353}
354
355static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
356 .mf6c_origin = IN6ADDR_ANY_INIT,
357 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
358};
359
360static struct mr_table_ops ip6mr_mr_table_ops = {
361 .rht_params = &ip6mr_rht_params,
362 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
363};
364
365static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
366{
367 struct mr_table *mrt;
368
369 mrt = ip6mr_get_table(net, id);
370 if (mrt)
371 return mrt;
372
373 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
374 ipmr_expire_process, ip6mr_new_table_set);
375}
376
377static void ip6mr_free_table(struct mr_table *mrt)
378{
379 del_timer_sync(&mrt->ipmr_expire_timer);
380 mroute_clean_tables(mrt, true);
381 rhltable_destroy(&mrt->mfc_hash);
382 kfree(mrt);
383}
384
385#ifdef CONFIG_PROC_FS
386/* The /proc interfaces to multicast routing
387 * /proc/ip6_mr_cache /proc/ip6_mr_vif
388 */
389
390static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
391 __acquires(mrt_lock)
392{
393 struct mr_vif_iter *iter = seq->private;
394 struct net *net = seq_file_net(seq);
395 struct mr_table *mrt;
396
397 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
398 if (!mrt)
399 return ERR_PTR(-ENOENT);
400
401 iter->mrt = mrt;
402
403 read_lock(&mrt_lock);
404 return mr_vif_seq_start(seq, pos);
405}
406
407static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
408 __releases(mrt_lock)
409{
410 read_unlock(&mrt_lock);
411}
412
413static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
414{
415 struct mr_vif_iter *iter = seq->private;
416 struct mr_table *mrt = iter->mrt;
417
418 if (v == SEQ_START_TOKEN) {
419 seq_puts(seq,
420 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
421 } else {
422 const struct vif_device *vif = v;
423 const char *name = vif->dev ? vif->dev->name : "none";
424
425 seq_printf(seq,
426 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
427 vif - mrt->vif_table,
428 name, vif->bytes_in, vif->pkt_in,
429 vif->bytes_out, vif->pkt_out,
430 vif->flags);
431 }
432 return 0;
433}
434
435static const struct seq_operations ip6mr_vif_seq_ops = {
436 .start = ip6mr_vif_seq_start,
437 .next = mr_vif_seq_next,
438 .stop = ip6mr_vif_seq_stop,
439 .show = ip6mr_vif_seq_show,
440};
441
442static int ip6mr_vif_open(struct inode *inode, struct file *file)
443{
444 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
445 sizeof(struct mr_vif_iter));
446}
447
448static const struct file_operations ip6mr_vif_fops = {
449 .open = ip6mr_vif_open,
450 .read = seq_read,
451 .llseek = seq_lseek,
452 .release = seq_release_net,
453};
454
455static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
456{
457 struct net *net = seq_file_net(seq);
458 struct mr_table *mrt;
459
460 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
461 if (!mrt)
462 return ERR_PTR(-ENOENT);
463
464 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
465}
466
467static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
468{
469 int n;
470
471 if (v == SEQ_START_TOKEN) {
472 seq_puts(seq,
473 "Group "
474 "Origin "
475 "Iif Pkts Bytes Wrong Oifs\n");
476 } else {
477 const struct mfc6_cache *mfc = v;
478 const struct mr_mfc_iter *it = seq->private;
479 struct mr_table *mrt = it->mrt;
480
481 seq_printf(seq, "%pI6 %pI6 %-3hd",
482 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
483 mfc->_c.mfc_parent);
484
485 if (it->cache != &mrt->mfc_unres_queue) {
486 seq_printf(seq, " %8lu %8lu %8lu",
487 mfc->_c.mfc_un.res.pkt,
488 mfc->_c.mfc_un.res.bytes,
489 mfc->_c.mfc_un.res.wrong_if);
490 for (n = mfc->_c.mfc_un.res.minvif;
491 n < mfc->_c.mfc_un.res.maxvif; n++) {
492 if (VIF_EXISTS(mrt, n) &&
493 mfc->_c.mfc_un.res.ttls[n] < 255)
494 seq_printf(seq,
495 " %2d:%-3d", n,
496 mfc->_c.mfc_un.res.ttls[n]);
497 }
498 } else {
499 /* unresolved mfc_caches don't contain
500 * pkt, bytes and wrong_if values
501 */
502 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
503 }
504 seq_putc(seq, '\n');
505 }
506 return 0;
507}
508
509static const struct seq_operations ipmr_mfc_seq_ops = {
510 .start = ipmr_mfc_seq_start,
511 .next = mr_mfc_seq_next,
512 .stop = mr_mfc_seq_stop,
513 .show = ipmr_mfc_seq_show,
514};
515
516static int ipmr_mfc_open(struct inode *inode, struct file *file)
517{
518 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
519 sizeof(struct mr_mfc_iter));
520}
521
522static const struct file_operations ip6mr_mfc_fops = {
523 .open = ipmr_mfc_open,
524 .read = seq_read,
525 .llseek = seq_lseek,
526 .release = seq_release_net,
527};
528#endif
529
530#ifdef CONFIG_IPV6_PIMSM_V2
531
532static int pim6_rcv(struct sk_buff *skb)
533{
534 struct pimreghdr *pim;
535 struct ipv6hdr *encap;
536 struct net_device *reg_dev = NULL;
537 struct net *net = dev_net(skb->dev);
538 struct mr_table *mrt;
539 struct flowi6 fl6 = {
540 .flowi6_iif = skb->dev->ifindex,
541 .flowi6_mark = skb->mark,
542 };
543 int reg_vif_num;
544
545 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
546 goto drop;
547
548 pim = (struct pimreghdr *)skb_transport_header(skb);
549 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
550 (pim->flags & PIM_NULL_REGISTER) ||
551 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
552 sizeof(*pim), IPPROTO_PIM,
553 csum_partial((void *)pim, sizeof(*pim), 0)) &&
554 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
555 goto drop;
556
557 /* check if the inner packet is destined to mcast group */
558 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
559 sizeof(*pim));
560
561 if (!ipv6_addr_is_multicast(&encap->daddr) ||
562 encap->payload_len == 0 ||
563 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
564 goto drop;
565
566 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
567 goto drop;
568 reg_vif_num = mrt->mroute_reg_vif_num;
569
570 read_lock(&mrt_lock);
571 if (reg_vif_num >= 0)
572 reg_dev = mrt->vif_table[reg_vif_num].dev;
573 if (reg_dev)
574 dev_hold(reg_dev);
575 read_unlock(&mrt_lock);
576
577 if (!reg_dev)
578 goto drop;
579
580 skb->mac_header = skb->network_header;
581 skb_pull(skb, (u8 *)encap - skb->data);
582 skb_reset_network_header(skb);
583 skb->protocol = htons(ETH_P_IPV6);
584 skb->ip_summed = CHECKSUM_NONE;
585
586 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
587
588 netif_rx(skb);
589
590 dev_put(reg_dev);
591 return 0;
592 drop:
593 kfree_skb(skb);
594 return 0;
595}
596
597static const struct inet6_protocol pim6_protocol = {
598 .handler = pim6_rcv,
599};
600
601/* Service routines creating virtual interfaces: PIMREG */
602
603static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
604 struct net_device *dev)
605{
606 struct net *net = dev_net(dev);
607 struct mr_table *mrt;
608 struct flowi6 fl6 = {
609 .flowi6_oif = dev->ifindex,
610 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
611 .flowi6_mark = skb->mark,
612 };
613 int err;
614
615 err = ip6mr_fib_lookup(net, &fl6, &mrt);
616 if (err < 0) {
617 kfree_skb(skb);
618 return err;
619 }
620
621 read_lock(&mrt_lock);
622 dev->stats.tx_bytes += skb->len;
623 dev->stats.tx_packets++;
624 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
625 read_unlock(&mrt_lock);
626 kfree_skb(skb);
627 return NETDEV_TX_OK;
628}
629
630static int reg_vif_get_iflink(const struct net_device *dev)
631{
632 return 0;
633}
634
635static const struct net_device_ops reg_vif_netdev_ops = {
636 .ndo_start_xmit = reg_vif_xmit,
637 .ndo_get_iflink = reg_vif_get_iflink,
638};
639
640static void reg_vif_setup(struct net_device *dev)
641{
642 dev->type = ARPHRD_PIMREG;
643 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
644 dev->flags = IFF_NOARP;
645 dev->netdev_ops = ®_vif_netdev_ops;
646 dev->needs_free_netdev = true;
647 dev->features |= NETIF_F_NETNS_LOCAL;
648}
649
650static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
651{
652 struct net_device *dev;
653 char name[IFNAMSIZ];
654
655 if (mrt->id == RT6_TABLE_DFLT)
656 sprintf(name, "pim6reg");
657 else
658 sprintf(name, "pim6reg%u", mrt->id);
659
660 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
661 if (!dev)
662 return NULL;
663
664 dev_net_set(dev, net);
665
666 if (register_netdevice(dev)) {
667 free_netdev(dev);
668 return NULL;
669 }
670
671 if (dev_open(dev))
672 goto failure;
673
674 dev_hold(dev);
675 return dev;
676
677failure:
678 unregister_netdevice(dev);
679 return NULL;
680}
681#endif
682
683static int call_ip6mr_vif_entry_notifiers(struct net *net,
684 enum fib_event_type event_type,
685 struct vif_device *vif,
686 mifi_t vif_index, u32 tb_id)
687{
688 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
689 vif, vif_index, tb_id,
690 &net->ipv6.ipmr_seq);
691}
692
693static int call_ip6mr_mfc_entry_notifiers(struct net *net,
694 enum fib_event_type event_type,
695 struct mfc6_cache *mfc, u32 tb_id)
696{
697 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
698 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
699}
700
701/* Delete a VIF entry */
702static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
703 struct list_head *head)
704{
705 struct vif_device *v;
706 struct net_device *dev;
707 struct inet6_dev *in6_dev;
708
709 if (vifi < 0 || vifi >= mrt->maxvif)
710 return -EADDRNOTAVAIL;
711
712 v = &mrt->vif_table[vifi];
713
714 if (VIF_EXISTS(mrt, vifi))
715 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
716 FIB_EVENT_VIF_DEL, v, vifi,
717 mrt->id);
718
719 write_lock_bh(&mrt_lock);
720 dev = v->dev;
721 v->dev = NULL;
722
723 if (!dev) {
724 write_unlock_bh(&mrt_lock);
725 return -EADDRNOTAVAIL;
726 }
727
728#ifdef CONFIG_IPV6_PIMSM_V2
729 if (vifi == mrt->mroute_reg_vif_num)
730 mrt->mroute_reg_vif_num = -1;
731#endif
732
733 if (vifi + 1 == mrt->maxvif) {
734 int tmp;
735 for (tmp = vifi - 1; tmp >= 0; tmp--) {
736 if (VIF_EXISTS(mrt, tmp))
737 break;
738 }
739 mrt->maxvif = tmp + 1;
740 }
741
742 write_unlock_bh(&mrt_lock);
743
744 dev_set_allmulti(dev, -1);
745
746 in6_dev = __in6_dev_get(dev);
747 if (in6_dev) {
748 in6_dev->cnf.mc_forwarding--;
749 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
750 NETCONFA_MC_FORWARDING,
751 dev->ifindex, &in6_dev->cnf);
752 }
753
754 if ((v->flags & MIFF_REGISTER) && !notify)
755 unregister_netdevice_queue(dev, head);
756
757 dev_put(dev);
758 return 0;
759}
760
761static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
762{
763 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
764
765 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
766}
767
768static inline void ip6mr_cache_free(struct mfc6_cache *c)
769{
770 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
771}
772
773/* Destroy an unresolved cache entry, killing queued skbs
774 and reporting error to netlink readers.
775 */
776
777static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
778{
779 struct net *net = read_pnet(&mrt->net);
780 struct sk_buff *skb;
781
782 atomic_dec(&mrt->cache_resolve_queue_len);
783
784 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
785 if (ipv6_hdr(skb)->version == 0) {
786 struct nlmsghdr *nlh = skb_pull(skb,
787 sizeof(struct ipv6hdr));
788 nlh->nlmsg_type = NLMSG_ERROR;
789 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
790 skb_trim(skb, nlh->nlmsg_len);
791 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
792 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
793 } else
794 kfree_skb(skb);
795 }
796
797 ip6mr_cache_free(c);
798}
799
800
801/* Timer process for all the unresolved queue. */
802
803static void ipmr_do_expire_process(struct mr_table *mrt)
804{
805 unsigned long now = jiffies;
806 unsigned long expires = 10 * HZ;
807 struct mr_mfc *c, *next;
808
809 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
810 if (time_after(c->mfc_un.unres.expires, now)) {
811 /* not yet... */
812 unsigned long interval = c->mfc_un.unres.expires - now;
813 if (interval < expires)
814 expires = interval;
815 continue;
816 }
817
818 list_del(&c->list);
819 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
820 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
821 }
822
823 if (!list_empty(&mrt->mfc_unres_queue))
824 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
825}
826
827static void ipmr_expire_process(struct timer_list *t)
828{
829 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
830
831 if (!spin_trylock(&mfc_unres_lock)) {
832 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
833 return;
834 }
835
836 if (!list_empty(&mrt->mfc_unres_queue))
837 ipmr_do_expire_process(mrt);
838
839 spin_unlock(&mfc_unres_lock);
840}
841
842/* Fill oifs list. It is called under write locked mrt_lock. */
843
844static void ip6mr_update_thresholds(struct mr_table *mrt,
845 struct mr_mfc *cache,
846 unsigned char *ttls)
847{
848 int vifi;
849
850 cache->mfc_un.res.minvif = MAXMIFS;
851 cache->mfc_un.res.maxvif = 0;
852 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
853
854 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
855 if (VIF_EXISTS(mrt, vifi) &&
856 ttls[vifi] && ttls[vifi] < 255) {
857 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
858 if (cache->mfc_un.res.minvif > vifi)
859 cache->mfc_un.res.minvif = vifi;
860 if (cache->mfc_un.res.maxvif <= vifi)
861 cache->mfc_un.res.maxvif = vifi + 1;
862 }
863 }
864 cache->mfc_un.res.lastuse = jiffies;
865}
866
867static int mif6_add(struct net *net, struct mr_table *mrt,
868 struct mif6ctl *vifc, int mrtsock)
869{
870 int vifi = vifc->mif6c_mifi;
871 struct vif_device *v = &mrt->vif_table[vifi];
872 struct net_device *dev;
873 struct inet6_dev *in6_dev;
874 int err;
875
876 /* Is vif busy ? */
877 if (VIF_EXISTS(mrt, vifi))
878 return -EADDRINUSE;
879
880 switch (vifc->mif6c_flags) {
881#ifdef CONFIG_IPV6_PIMSM_V2
882 case MIFF_REGISTER:
883 /*
884 * Special Purpose VIF in PIM
885 * All the packets will be sent to the daemon
886 */
887 if (mrt->mroute_reg_vif_num >= 0)
888 return -EADDRINUSE;
889 dev = ip6mr_reg_vif(net, mrt);
890 if (!dev)
891 return -ENOBUFS;
892 err = dev_set_allmulti(dev, 1);
893 if (err) {
894 unregister_netdevice(dev);
895 dev_put(dev);
896 return err;
897 }
898 break;
899#endif
900 case 0:
901 dev = dev_get_by_index(net, vifc->mif6c_pifi);
902 if (!dev)
903 return -EADDRNOTAVAIL;
904 err = dev_set_allmulti(dev, 1);
905 if (err) {
906 dev_put(dev);
907 return err;
908 }
909 break;
910 default:
911 return -EINVAL;
912 }
913
914 in6_dev = __in6_dev_get(dev);
915 if (in6_dev) {
916 in6_dev->cnf.mc_forwarding++;
917 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
918 NETCONFA_MC_FORWARDING,
919 dev->ifindex, &in6_dev->cnf);
920 }
921
922 /* Fill in the VIF structures */
923 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
924 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
925 MIFF_REGISTER);
926
927 /* And finish update writing critical data */
928 write_lock_bh(&mrt_lock);
929 v->dev = dev;
930#ifdef CONFIG_IPV6_PIMSM_V2
931 if (v->flags & MIFF_REGISTER)
932 mrt->mroute_reg_vif_num = vifi;
933#endif
934 if (vifi + 1 > mrt->maxvif)
935 mrt->maxvif = vifi + 1;
936 write_unlock_bh(&mrt_lock);
937 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
938 v, vifi, mrt->id);
939 return 0;
940}
941
942static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
943 const struct in6_addr *origin,
944 const struct in6_addr *mcastgrp)
945{
946 struct mfc6_cache_cmp_arg arg = {
947 .mf6c_origin = *origin,
948 .mf6c_mcastgrp = *mcastgrp,
949 };
950
951 return mr_mfc_find(mrt, &arg);
952}
953
954/* Look for a (*,G) entry */
955static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
956 struct in6_addr *mcastgrp,
957 mifi_t mifi)
958{
959 struct mfc6_cache_cmp_arg arg = {
960 .mf6c_origin = in6addr_any,
961 .mf6c_mcastgrp = *mcastgrp,
962 };
963
964 if (ipv6_addr_any(mcastgrp))
965 return mr_mfc_find_any_parent(mrt, mifi);
966 return mr_mfc_find_any(mrt, mifi, &arg);
967}
968
969/* Look for a (S,G,iif) entry if parent != -1 */
970static struct mfc6_cache *
971ip6mr_cache_find_parent(struct mr_table *mrt,
972 const struct in6_addr *origin,
973 const struct in6_addr *mcastgrp,
974 int parent)
975{
976 struct mfc6_cache_cmp_arg arg = {
977 .mf6c_origin = *origin,
978 .mf6c_mcastgrp = *mcastgrp,
979 };
980
981 return mr_mfc_find_parent(mrt, &arg, parent);
982}
983
984/* Allocate a multicast cache entry */
985static struct mfc6_cache *ip6mr_cache_alloc(void)
986{
987 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
988 if (!c)
989 return NULL;
990 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
991 c->_c.mfc_un.res.minvif = MAXMIFS;
992 c->_c.free = ip6mr_cache_free_rcu;
993 refcount_set(&c->_c.mfc_un.res.refcount, 1);
994 return c;
995}
996
997static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
998{
999 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1000 if (!c)
1001 return NULL;
1002 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
1003 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
1004 return c;
1005}
1006
1007/*
1008 * A cache entry has gone into a resolved state from queued
1009 */
1010
1011static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1012 struct mfc6_cache *uc, struct mfc6_cache *c)
1013{
1014 struct sk_buff *skb;
1015
1016 /*
1017 * Play the pending entries through our router
1018 */
1019
1020 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1021 if (ipv6_hdr(skb)->version == 0) {
1022 struct nlmsghdr *nlh = skb_pull(skb,
1023 sizeof(struct ipv6hdr));
1024
1025 if (mr_fill_mroute(mrt, skb, &c->_c,
1026 nlmsg_data(nlh)) > 0) {
1027 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1028 } else {
1029 nlh->nlmsg_type = NLMSG_ERROR;
1030 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1031 skb_trim(skb, nlh->nlmsg_len);
1032 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1033 }
1034 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1035 } else
1036 ip6_mr_forward(net, mrt, skb, c);
1037 }
1038}
1039
1040/*
1041 * Bounce a cache query up to pim6sd and netlink.
1042 *
1043 * Called under mrt_lock.
1044 */
1045
1046static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1047 mifi_t mifi, int assert)
1048{
1049 struct sock *mroute6_sk;
1050 struct sk_buff *skb;
1051 struct mrt6msg *msg;
1052 int ret;
1053
1054#ifdef CONFIG_IPV6_PIMSM_V2
1055 if (assert == MRT6MSG_WHOLEPKT)
1056 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1057 +sizeof(*msg));
1058 else
1059#endif
1060 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1061
1062 if (!skb)
1063 return -ENOBUFS;
1064
1065 /* I suppose that internal messages
1066 * do not require checksums */
1067
1068 skb->ip_summed = CHECKSUM_UNNECESSARY;
1069
1070#ifdef CONFIG_IPV6_PIMSM_V2
1071 if (assert == MRT6MSG_WHOLEPKT) {
1072 /* Ugly, but we have no choice with this interface.
1073 Duplicate old header, fix length etc.
1074 And all this only to mangle msg->im6_msgtype and
1075 to set msg->im6_mbz to "mbz" :-)
1076 */
1077 skb_push(skb, -skb_network_offset(pkt));
1078
1079 skb_push(skb, sizeof(*msg));
1080 skb_reset_transport_header(skb);
1081 msg = (struct mrt6msg *)skb_transport_header(skb);
1082 msg->im6_mbz = 0;
1083 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1084 msg->im6_mif = mrt->mroute_reg_vif_num;
1085 msg->im6_pad = 0;
1086 msg->im6_src = ipv6_hdr(pkt)->saddr;
1087 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1088
1089 skb->ip_summed = CHECKSUM_UNNECESSARY;
1090 } else
1091#endif
1092 {
1093 /*
1094 * Copy the IP header
1095 */
1096
1097 skb_put(skb, sizeof(struct ipv6hdr));
1098 skb_reset_network_header(skb);
1099 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1100
1101 /*
1102 * Add our header
1103 */
1104 skb_put(skb, sizeof(*msg));
1105 skb_reset_transport_header(skb);
1106 msg = (struct mrt6msg *)skb_transport_header(skb);
1107
1108 msg->im6_mbz = 0;
1109 msg->im6_msgtype = assert;
1110 msg->im6_mif = mifi;
1111 msg->im6_pad = 0;
1112 msg->im6_src = ipv6_hdr(pkt)->saddr;
1113 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1114
1115 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1116 skb->ip_summed = CHECKSUM_UNNECESSARY;
1117 }
1118
1119 rcu_read_lock();
1120 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1121 if (!mroute6_sk) {
1122 rcu_read_unlock();
1123 kfree_skb(skb);
1124 return -EINVAL;
1125 }
1126
1127 mrt6msg_netlink_event(mrt, skb);
1128
1129 /* Deliver to user space multicast routing algorithms */
1130 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1131 rcu_read_unlock();
1132 if (ret < 0) {
1133 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1134 kfree_skb(skb);
1135 }
1136
1137 return ret;
1138}
1139
1140/* Queue a packet for resolution. It gets locked cache entry! */
1141static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1142 struct sk_buff *skb)
1143{
1144 struct mfc6_cache *c;
1145 bool found = false;
1146 int err;
1147
1148 spin_lock_bh(&mfc_unres_lock);
1149 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1150 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1151 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1152 found = true;
1153 break;
1154 }
1155 }
1156
1157 if (!found) {
1158 /*
1159 * Create a new entry if allowable
1160 */
1161
1162 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1163 (c = ip6mr_cache_alloc_unres()) == NULL) {
1164 spin_unlock_bh(&mfc_unres_lock);
1165
1166 kfree_skb(skb);
1167 return -ENOBUFS;
1168 }
1169
1170 /* Fill in the new cache entry */
1171 c->_c.mfc_parent = -1;
1172 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1173 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1174
1175 /*
1176 * Reflect first query at pim6sd
1177 */
1178 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1179 if (err < 0) {
1180 /* If the report failed throw the cache entry
1181 out - Brad Parker
1182 */
1183 spin_unlock_bh(&mfc_unres_lock);
1184
1185 ip6mr_cache_free(c);
1186 kfree_skb(skb);
1187 return err;
1188 }
1189
1190 atomic_inc(&mrt->cache_resolve_queue_len);
1191 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1192 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1193
1194 ipmr_do_expire_process(mrt);
1195 }
1196
1197 /* See if we can append the packet */
1198 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1199 kfree_skb(skb);
1200 err = -ENOBUFS;
1201 } else {
1202 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1203 err = 0;
1204 }
1205
1206 spin_unlock_bh(&mfc_unres_lock);
1207 return err;
1208}
1209
1210/*
1211 * MFC6 cache manipulation by user space
1212 */
1213
1214static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1215 int parent)
1216{
1217 struct mfc6_cache *c;
1218
1219 /* The entries are added/deleted only under RTNL */
1220 rcu_read_lock();
1221 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1222 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1223 rcu_read_unlock();
1224 if (!c)
1225 return -ENOENT;
1226 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1227 list_del_rcu(&c->_c.list);
1228
1229 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1230 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1231 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1232 mr_cache_put(&c->_c);
1233 return 0;
1234}
1235
1236static int ip6mr_device_event(struct notifier_block *this,
1237 unsigned long event, void *ptr)
1238{
1239 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1240 struct net *net = dev_net(dev);
1241 struct mr_table *mrt;
1242 struct vif_device *v;
1243 int ct;
1244
1245 if (event != NETDEV_UNREGISTER)
1246 return NOTIFY_DONE;
1247
1248 ip6mr_for_each_table(mrt, net) {
1249 v = &mrt->vif_table[0];
1250 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1251 if (v->dev == dev)
1252 mif6_delete(mrt, ct, 1, NULL);
1253 }
1254 }
1255
1256 return NOTIFY_DONE;
1257}
1258
1259static unsigned int ip6mr_seq_read(struct net *net)
1260{
1261 ASSERT_RTNL();
1262
1263 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1264}
1265
1266static int ip6mr_dump(struct net *net, struct notifier_block *nb)
1267{
1268 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1269 ip6mr_mr_table_iter, &mrt_lock);
1270}
1271
1272static struct notifier_block ip6_mr_notifier = {
1273 .notifier_call = ip6mr_device_event
1274};
1275
1276static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1277 .family = RTNL_FAMILY_IP6MR,
1278 .fib_seq_read = ip6mr_seq_read,
1279 .fib_dump = ip6mr_dump,
1280 .owner = THIS_MODULE,
1281};
1282
1283static int __net_init ip6mr_notifier_init(struct net *net)
1284{
1285 struct fib_notifier_ops *ops;
1286
1287 net->ipv6.ipmr_seq = 0;
1288
1289 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1290 if (IS_ERR(ops))
1291 return PTR_ERR(ops);
1292
1293 net->ipv6.ip6mr_notifier_ops = ops;
1294
1295 return 0;
1296}
1297
1298static void __net_exit ip6mr_notifier_exit(struct net *net)
1299{
1300 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1301 net->ipv6.ip6mr_notifier_ops = NULL;
1302}
1303
1304/* Setup for IP multicast routing */
1305static int __net_init ip6mr_net_init(struct net *net)
1306{
1307 int err;
1308
1309 err = ip6mr_notifier_init(net);
1310 if (err)
1311 return err;
1312
1313 err = ip6mr_rules_init(net);
1314 if (err < 0)
1315 goto ip6mr_rules_fail;
1316
1317#ifdef CONFIG_PROC_FS
1318 err = -ENOMEM;
1319 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1320 goto proc_vif_fail;
1321 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1322 goto proc_cache_fail;
1323#endif
1324
1325 return 0;
1326
1327#ifdef CONFIG_PROC_FS
1328proc_cache_fail:
1329 remove_proc_entry("ip6_mr_vif", net->proc_net);
1330proc_vif_fail:
1331 ip6mr_rules_exit(net);
1332#endif
1333ip6mr_rules_fail:
1334 ip6mr_notifier_exit(net);
1335 return err;
1336}
1337
1338static void __net_exit ip6mr_net_exit(struct net *net)
1339{
1340#ifdef CONFIG_PROC_FS
1341 remove_proc_entry("ip6_mr_cache", net->proc_net);
1342 remove_proc_entry("ip6_mr_vif", net->proc_net);
1343#endif
1344 ip6mr_rules_exit(net);
1345 ip6mr_notifier_exit(net);
1346}
1347
1348static struct pernet_operations ip6mr_net_ops = {
1349 .init = ip6mr_net_init,
1350 .exit = ip6mr_net_exit,
1351};
1352
1353int __init ip6_mr_init(void)
1354{
1355 int err;
1356
1357 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1358 sizeof(struct mfc6_cache),
1359 0, SLAB_HWCACHE_ALIGN,
1360 NULL);
1361 if (!mrt_cachep)
1362 return -ENOMEM;
1363
1364 err = register_pernet_subsys(&ip6mr_net_ops);
1365 if (err)
1366 goto reg_pernet_fail;
1367
1368 err = register_netdevice_notifier(&ip6_mr_notifier);
1369 if (err)
1370 goto reg_notif_fail;
1371#ifdef CONFIG_IPV6_PIMSM_V2
1372 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1373 pr_err("%s: can't add PIM protocol\n", __func__);
1374 err = -EAGAIN;
1375 goto add_proto_fail;
1376 }
1377#endif
1378 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1379 NULL, ip6mr_rtm_dumproute, 0);
1380 if (err == 0)
1381 return 0;
1382
1383#ifdef CONFIG_IPV6_PIMSM_V2
1384 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1385add_proto_fail:
1386 unregister_netdevice_notifier(&ip6_mr_notifier);
1387#endif
1388reg_notif_fail:
1389 unregister_pernet_subsys(&ip6mr_net_ops);
1390reg_pernet_fail:
1391 kmem_cache_destroy(mrt_cachep);
1392 return err;
1393}
1394
1395void ip6_mr_cleanup(void)
1396{
1397 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1398#ifdef CONFIG_IPV6_PIMSM_V2
1399 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1400#endif
1401 unregister_netdevice_notifier(&ip6_mr_notifier);
1402 unregister_pernet_subsys(&ip6mr_net_ops);
1403 kmem_cache_destroy(mrt_cachep);
1404}
1405
1406static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1407 struct mf6cctl *mfc, int mrtsock, int parent)
1408{
1409 unsigned char ttls[MAXMIFS];
1410 struct mfc6_cache *uc, *c;
1411 struct mr_mfc *_uc;
1412 bool found;
1413 int i, err;
1414
1415 if (mfc->mf6cc_parent >= MAXMIFS)
1416 return -ENFILE;
1417
1418 memset(ttls, 255, MAXMIFS);
1419 for (i = 0; i < MAXMIFS; i++) {
1420 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1421 ttls[i] = 1;
1422 }
1423
1424 /* The entries are added/deleted only under RTNL */
1425 rcu_read_lock();
1426 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1427 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1428 rcu_read_unlock();
1429 if (c) {
1430 write_lock_bh(&mrt_lock);
1431 c->_c.mfc_parent = mfc->mf6cc_parent;
1432 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1433 if (!mrtsock)
1434 c->_c.mfc_flags |= MFC_STATIC;
1435 write_unlock_bh(&mrt_lock);
1436 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1437 c, mrt->id);
1438 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1439 return 0;
1440 }
1441
1442 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1443 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1444 return -EINVAL;
1445
1446 c = ip6mr_cache_alloc();
1447 if (!c)
1448 return -ENOMEM;
1449
1450 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1451 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1452 c->_c.mfc_parent = mfc->mf6cc_parent;
1453 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1454 if (!mrtsock)
1455 c->_c.mfc_flags |= MFC_STATIC;
1456
1457 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1458 ip6mr_rht_params);
1459 if (err) {
1460 pr_err("ip6mr: rhtable insert error %d\n", err);
1461 ip6mr_cache_free(c);
1462 return err;
1463 }
1464 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1465
1466 /* Check to see if we resolved a queued list. If so we
1467 * need to send on the frames and tidy up.
1468 */
1469 found = false;
1470 spin_lock_bh(&mfc_unres_lock);
1471 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1472 uc = (struct mfc6_cache *)_uc;
1473 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1474 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1475 list_del(&_uc->list);
1476 atomic_dec(&mrt->cache_resolve_queue_len);
1477 found = true;
1478 break;
1479 }
1480 }
1481 if (list_empty(&mrt->mfc_unres_queue))
1482 del_timer(&mrt->ipmr_expire_timer);
1483 spin_unlock_bh(&mfc_unres_lock);
1484
1485 if (found) {
1486 ip6mr_cache_resolve(net, mrt, uc, c);
1487 ip6mr_cache_free(uc);
1488 }
1489 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1490 c, mrt->id);
1491 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1492 return 0;
1493}
1494
1495/*
1496 * Close the multicast socket, and clear the vif tables etc
1497 */
1498
1499static void mroute_clean_tables(struct mr_table *mrt, bool all)
1500{
1501 struct mr_mfc *c, *tmp;
1502 LIST_HEAD(list);
1503 int i;
1504
1505 /* Shut down all active vif entries */
1506 for (i = 0; i < mrt->maxvif; i++) {
1507 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1508 continue;
1509 mif6_delete(mrt, i, 0, &list);
1510 }
1511 unregister_netdevice_many(&list);
1512
1513 /* Wipe the cache */
1514 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1515 if (!all && (c->mfc_flags & MFC_STATIC))
1516 continue;
1517 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1518 list_del_rcu(&c->list);
1519 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1520 mr_cache_put(c);
1521 }
1522
1523 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1524 spin_lock_bh(&mfc_unres_lock);
1525 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1526 list_del(&c->list);
1527 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1528 FIB_EVENT_ENTRY_DEL,
1529 (struct mfc6_cache *)c,
1530 mrt->id);
1531 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1532 RTM_DELROUTE);
1533 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1534 }
1535 spin_unlock_bh(&mfc_unres_lock);
1536 }
1537}
1538
1539static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1540{
1541 int err = 0;
1542 struct net *net = sock_net(sk);
1543
1544 rtnl_lock();
1545 write_lock_bh(&mrt_lock);
1546 if (rtnl_dereference(mrt->mroute_sk)) {
1547 err = -EADDRINUSE;
1548 } else {
1549 rcu_assign_pointer(mrt->mroute_sk, sk);
1550 sock_set_flag(sk, SOCK_RCU_FREE);
1551 net->ipv6.devconf_all->mc_forwarding++;
1552 }
1553 write_unlock_bh(&mrt_lock);
1554
1555 if (!err)
1556 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1557 NETCONFA_MC_FORWARDING,
1558 NETCONFA_IFINDEX_ALL,
1559 net->ipv6.devconf_all);
1560 rtnl_unlock();
1561
1562 return err;
1563}
1564
1565int ip6mr_sk_done(struct sock *sk)
1566{
1567 int err = -EACCES;
1568 struct net *net = sock_net(sk);
1569 struct mr_table *mrt;
1570
1571 if (sk->sk_type != SOCK_RAW ||
1572 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1573 return err;
1574
1575 rtnl_lock();
1576 ip6mr_for_each_table(mrt, net) {
1577 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1578 write_lock_bh(&mrt_lock);
1579 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1580 /* Note that mroute_sk had SOCK_RCU_FREE set,
1581 * so the RCU grace period before sk freeing
1582 * is guaranteed by sk_destruct()
1583 */
1584 net->ipv6.devconf_all->mc_forwarding--;
1585 write_unlock_bh(&mrt_lock);
1586 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1587 NETCONFA_MC_FORWARDING,
1588 NETCONFA_IFINDEX_ALL,
1589 net->ipv6.devconf_all);
1590
1591 mroute_clean_tables(mrt, false);
1592 err = 0;
1593 break;
1594 }
1595 }
1596 rtnl_unlock();
1597
1598 return err;
1599}
1600
1601bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1602{
1603 struct mr_table *mrt;
1604 struct flowi6 fl6 = {
1605 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1606 .flowi6_oif = skb->dev->ifindex,
1607 .flowi6_mark = skb->mark,
1608 };
1609
1610 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1611 return NULL;
1612
1613 return rcu_access_pointer(mrt->mroute_sk);
1614}
1615EXPORT_SYMBOL(mroute6_is_socket);
1616
1617/*
1618 * Socket options and virtual interface manipulation. The whole
1619 * virtual interface system is a complete heap, but unfortunately
1620 * that's how BSD mrouted happens to think. Maybe one day with a proper
1621 * MOSPF/PIM router set up we can clean this up.
1622 */
1623
1624int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1625{
1626 int ret, parent = 0;
1627 struct mif6ctl vif;
1628 struct mf6cctl mfc;
1629 mifi_t mifi;
1630 struct net *net = sock_net(sk);
1631 struct mr_table *mrt;
1632
1633 if (sk->sk_type != SOCK_RAW ||
1634 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1635 return -EOPNOTSUPP;
1636
1637 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1638 if (!mrt)
1639 return -ENOENT;
1640
1641 if (optname != MRT6_INIT) {
1642 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1643 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1644 return -EACCES;
1645 }
1646
1647 switch (optname) {
1648 case MRT6_INIT:
1649 if (optlen < sizeof(int))
1650 return -EINVAL;
1651
1652 return ip6mr_sk_init(mrt, sk);
1653
1654 case MRT6_DONE:
1655 return ip6mr_sk_done(sk);
1656
1657 case MRT6_ADD_MIF:
1658 if (optlen < sizeof(vif))
1659 return -EINVAL;
1660 if (copy_from_user(&vif, optval, sizeof(vif)))
1661 return -EFAULT;
1662 if (vif.mif6c_mifi >= MAXMIFS)
1663 return -ENFILE;
1664 rtnl_lock();
1665 ret = mif6_add(net, mrt, &vif,
1666 sk == rtnl_dereference(mrt->mroute_sk));
1667 rtnl_unlock();
1668 return ret;
1669
1670 case MRT6_DEL_MIF:
1671 if (optlen < sizeof(mifi_t))
1672 return -EINVAL;
1673 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1674 return -EFAULT;
1675 rtnl_lock();
1676 ret = mif6_delete(mrt, mifi, 0, NULL);
1677 rtnl_unlock();
1678 return ret;
1679
1680 /*
1681 * Manipulate the forwarding caches. These live
1682 * in a sort of kernel/user symbiosis.
1683 */
1684 case MRT6_ADD_MFC:
1685 case MRT6_DEL_MFC:
1686 parent = -1;
1687 /* fall through */
1688 case MRT6_ADD_MFC_PROXY:
1689 case MRT6_DEL_MFC_PROXY:
1690 if (optlen < sizeof(mfc))
1691 return -EINVAL;
1692 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1693 return -EFAULT;
1694 if (parent == 0)
1695 parent = mfc.mf6cc_parent;
1696 rtnl_lock();
1697 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1698 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1699 else
1700 ret = ip6mr_mfc_add(net, mrt, &mfc,
1701 sk ==
1702 rtnl_dereference(mrt->mroute_sk),
1703 parent);
1704 rtnl_unlock();
1705 return ret;
1706
1707 /*
1708 * Control PIM assert (to activate pim will activate assert)
1709 */
1710 case MRT6_ASSERT:
1711 {
1712 int v;
1713
1714 if (optlen != sizeof(v))
1715 return -EINVAL;
1716 if (get_user(v, (int __user *)optval))
1717 return -EFAULT;
1718 mrt->mroute_do_assert = v;
1719 return 0;
1720 }
1721
1722#ifdef CONFIG_IPV6_PIMSM_V2
1723 case MRT6_PIM:
1724 {
1725 int v;
1726
1727 if (optlen != sizeof(v))
1728 return -EINVAL;
1729 if (get_user(v, (int __user *)optval))
1730 return -EFAULT;
1731 v = !!v;
1732 rtnl_lock();
1733 ret = 0;
1734 if (v != mrt->mroute_do_pim) {
1735 mrt->mroute_do_pim = v;
1736 mrt->mroute_do_assert = v;
1737 }
1738 rtnl_unlock();
1739 return ret;
1740 }
1741
1742#endif
1743#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1744 case MRT6_TABLE:
1745 {
1746 u32 v;
1747
1748 if (optlen != sizeof(u32))
1749 return -EINVAL;
1750 if (get_user(v, (u32 __user *)optval))
1751 return -EFAULT;
1752 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1753 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1754 return -EINVAL;
1755 if (sk == rcu_access_pointer(mrt->mroute_sk))
1756 return -EBUSY;
1757
1758 rtnl_lock();
1759 ret = 0;
1760 if (!ip6mr_new_table(net, v))
1761 ret = -ENOMEM;
1762 raw6_sk(sk)->ip6mr_table = v;
1763 rtnl_unlock();
1764 return ret;
1765 }
1766#endif
1767 /*
1768 * Spurious command, or MRT6_VERSION which you cannot
1769 * set.
1770 */
1771 default:
1772 return -ENOPROTOOPT;
1773 }
1774}
1775
1776/*
1777 * Getsock opt support for the multicast routing system.
1778 */
1779
1780int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1781 int __user *optlen)
1782{
1783 int olr;
1784 int val;
1785 struct net *net = sock_net(sk);
1786 struct mr_table *mrt;
1787
1788 if (sk->sk_type != SOCK_RAW ||
1789 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1790 return -EOPNOTSUPP;
1791
1792 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1793 if (!mrt)
1794 return -ENOENT;
1795
1796 switch (optname) {
1797 case MRT6_VERSION:
1798 val = 0x0305;
1799 break;
1800#ifdef CONFIG_IPV6_PIMSM_V2
1801 case MRT6_PIM:
1802 val = mrt->mroute_do_pim;
1803 break;
1804#endif
1805 case MRT6_ASSERT:
1806 val = mrt->mroute_do_assert;
1807 break;
1808 default:
1809 return -ENOPROTOOPT;
1810 }
1811
1812 if (get_user(olr, optlen))
1813 return -EFAULT;
1814
1815 olr = min_t(int, olr, sizeof(int));
1816 if (olr < 0)
1817 return -EINVAL;
1818
1819 if (put_user(olr, optlen))
1820 return -EFAULT;
1821 if (copy_to_user(optval, &val, olr))
1822 return -EFAULT;
1823 return 0;
1824}
1825
1826/*
1827 * The IP multicast ioctl support routines.
1828 */
1829
1830int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1831{
1832 struct sioc_sg_req6 sr;
1833 struct sioc_mif_req6 vr;
1834 struct vif_device *vif;
1835 struct mfc6_cache *c;
1836 struct net *net = sock_net(sk);
1837 struct mr_table *mrt;
1838
1839 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1840 if (!mrt)
1841 return -ENOENT;
1842
1843 switch (cmd) {
1844 case SIOCGETMIFCNT_IN6:
1845 if (copy_from_user(&vr, arg, sizeof(vr)))
1846 return -EFAULT;
1847 if (vr.mifi >= mrt->maxvif)
1848 return -EINVAL;
1849 read_lock(&mrt_lock);
1850 vif = &mrt->vif_table[vr.mifi];
1851 if (VIF_EXISTS(mrt, vr.mifi)) {
1852 vr.icount = vif->pkt_in;
1853 vr.ocount = vif->pkt_out;
1854 vr.ibytes = vif->bytes_in;
1855 vr.obytes = vif->bytes_out;
1856 read_unlock(&mrt_lock);
1857
1858 if (copy_to_user(arg, &vr, sizeof(vr)))
1859 return -EFAULT;
1860 return 0;
1861 }
1862 read_unlock(&mrt_lock);
1863 return -EADDRNOTAVAIL;
1864 case SIOCGETSGCNT_IN6:
1865 if (copy_from_user(&sr, arg, sizeof(sr)))
1866 return -EFAULT;
1867
1868 rcu_read_lock();
1869 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1870 if (c) {
1871 sr.pktcnt = c->_c.mfc_un.res.pkt;
1872 sr.bytecnt = c->_c.mfc_un.res.bytes;
1873 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1874 rcu_read_unlock();
1875
1876 if (copy_to_user(arg, &sr, sizeof(sr)))
1877 return -EFAULT;
1878 return 0;
1879 }
1880 rcu_read_unlock();
1881 return -EADDRNOTAVAIL;
1882 default:
1883 return -ENOIOCTLCMD;
1884 }
1885}
1886
1887#ifdef CONFIG_COMPAT
1888struct compat_sioc_sg_req6 {
1889 struct sockaddr_in6 src;
1890 struct sockaddr_in6 grp;
1891 compat_ulong_t pktcnt;
1892 compat_ulong_t bytecnt;
1893 compat_ulong_t wrong_if;
1894};
1895
1896struct compat_sioc_mif_req6 {
1897 mifi_t mifi;
1898 compat_ulong_t icount;
1899 compat_ulong_t ocount;
1900 compat_ulong_t ibytes;
1901 compat_ulong_t obytes;
1902};
1903
1904int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1905{
1906 struct compat_sioc_sg_req6 sr;
1907 struct compat_sioc_mif_req6 vr;
1908 struct vif_device *vif;
1909 struct mfc6_cache *c;
1910 struct net *net = sock_net(sk);
1911 struct mr_table *mrt;
1912
1913 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1914 if (!mrt)
1915 return -ENOENT;
1916
1917 switch (cmd) {
1918 case SIOCGETMIFCNT_IN6:
1919 if (copy_from_user(&vr, arg, sizeof(vr)))
1920 return -EFAULT;
1921 if (vr.mifi >= mrt->maxvif)
1922 return -EINVAL;
1923 read_lock(&mrt_lock);
1924 vif = &mrt->vif_table[vr.mifi];
1925 if (VIF_EXISTS(mrt, vr.mifi)) {
1926 vr.icount = vif->pkt_in;
1927 vr.ocount = vif->pkt_out;
1928 vr.ibytes = vif->bytes_in;
1929 vr.obytes = vif->bytes_out;
1930 read_unlock(&mrt_lock);
1931
1932 if (copy_to_user(arg, &vr, sizeof(vr)))
1933 return -EFAULT;
1934 return 0;
1935 }
1936 read_unlock(&mrt_lock);
1937 return -EADDRNOTAVAIL;
1938 case SIOCGETSGCNT_IN6:
1939 if (copy_from_user(&sr, arg, sizeof(sr)))
1940 return -EFAULT;
1941
1942 rcu_read_lock();
1943 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1944 if (c) {
1945 sr.pktcnt = c->_c.mfc_un.res.pkt;
1946 sr.bytecnt = c->_c.mfc_un.res.bytes;
1947 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1948 rcu_read_unlock();
1949
1950 if (copy_to_user(arg, &sr, sizeof(sr)))
1951 return -EFAULT;
1952 return 0;
1953 }
1954 rcu_read_unlock();
1955 return -EADDRNOTAVAIL;
1956 default:
1957 return -ENOIOCTLCMD;
1958 }
1959}
1960#endif
1961
1962static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1963{
1964 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1965 IPSTATS_MIB_OUTFORWDATAGRAMS);
1966 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1967 IPSTATS_MIB_OUTOCTETS, skb->len);
1968 return dst_output(net, sk, skb);
1969}
1970
1971/*
1972 * Processing handlers for ip6mr_forward
1973 */
1974
1975static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1976 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1977{
1978 struct ipv6hdr *ipv6h;
1979 struct vif_device *vif = &mrt->vif_table[vifi];
1980 struct net_device *dev;
1981 struct dst_entry *dst;
1982 struct flowi6 fl6;
1983
1984 if (!vif->dev)
1985 goto out_free;
1986
1987#ifdef CONFIG_IPV6_PIMSM_V2
1988 if (vif->flags & MIFF_REGISTER) {
1989 vif->pkt_out++;
1990 vif->bytes_out += skb->len;
1991 vif->dev->stats.tx_bytes += skb->len;
1992 vif->dev->stats.tx_packets++;
1993 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1994 goto out_free;
1995 }
1996#endif
1997
1998 ipv6h = ipv6_hdr(skb);
1999
2000 fl6 = (struct flowi6) {
2001 .flowi6_oif = vif->link,
2002 .daddr = ipv6h->daddr,
2003 };
2004
2005 dst = ip6_route_output(net, NULL, &fl6);
2006 if (dst->error) {
2007 dst_release(dst);
2008 goto out_free;
2009 }
2010
2011 skb_dst_drop(skb);
2012 skb_dst_set(skb, dst);
2013
2014 /*
2015 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2016 * not only before forwarding, but after forwarding on all output
2017 * interfaces. It is clear, if mrouter runs a multicasting
2018 * program, it should receive packets not depending to what interface
2019 * program is joined.
2020 * If we will not make it, the program will have to join on all
2021 * interfaces. On the other hand, multihoming host (or router, but
2022 * not mrouter) cannot join to more than one interface - it will
2023 * result in receiving multiple packets.
2024 */
2025 dev = vif->dev;
2026 skb->dev = dev;
2027 vif->pkt_out++;
2028 vif->bytes_out += skb->len;
2029
2030 /* We are about to write */
2031 /* XXX: extension headers? */
2032 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2033 goto out_free;
2034
2035 ipv6h = ipv6_hdr(skb);
2036 ipv6h->hop_limit--;
2037
2038 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2039
2040 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2041 net, NULL, skb, skb->dev, dev,
2042 ip6mr_forward2_finish);
2043
2044out_free:
2045 kfree_skb(skb);
2046 return 0;
2047}
2048
2049static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2050{
2051 int ct;
2052
2053 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2054 if (mrt->vif_table[ct].dev == dev)
2055 break;
2056 }
2057 return ct;
2058}
2059
2060static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2061 struct sk_buff *skb, struct mfc6_cache *c)
2062{
2063 int psend = -1;
2064 int vif, ct;
2065 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2066
2067 vif = c->_c.mfc_parent;
2068 c->_c.mfc_un.res.pkt++;
2069 c->_c.mfc_un.res.bytes += skb->len;
2070 c->_c.mfc_un.res.lastuse = jiffies;
2071
2072 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2073 struct mfc6_cache *cache_proxy;
2074
2075 /* For an (*,G) entry, we only check that the incoming
2076 * interface is part of the static tree.
2077 */
2078 rcu_read_lock();
2079 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2080 if (cache_proxy &&
2081 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2082 rcu_read_unlock();
2083 goto forward;
2084 }
2085 rcu_read_unlock();
2086 }
2087
2088 /*
2089 * Wrong interface: drop packet and (maybe) send PIM assert.
2090 */
2091 if (mrt->vif_table[vif].dev != skb->dev) {
2092 c->_c.mfc_un.res.wrong_if++;
2093
2094 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2095 /* pimsm uses asserts, when switching from RPT to SPT,
2096 so that we cannot check that packet arrived on an oif.
2097 It is bad, but otherwise we would need to move pretty
2098 large chunk of pimd to kernel. Ough... --ANK
2099 */
2100 (mrt->mroute_do_pim ||
2101 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2102 time_after(jiffies,
2103 c->_c.mfc_un.res.last_assert +
2104 MFC_ASSERT_THRESH)) {
2105 c->_c.mfc_un.res.last_assert = jiffies;
2106 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2107 }
2108 goto dont_forward;
2109 }
2110
2111forward:
2112 mrt->vif_table[vif].pkt_in++;
2113 mrt->vif_table[vif].bytes_in += skb->len;
2114
2115 /*
2116 * Forward the frame
2117 */
2118 if (ipv6_addr_any(&c->mf6c_origin) &&
2119 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2120 if (true_vifi >= 0 &&
2121 true_vifi != c->_c.mfc_parent &&
2122 ipv6_hdr(skb)->hop_limit >
2123 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2124 /* It's an (*,*) entry and the packet is not coming from
2125 * the upstream: forward the packet to the upstream
2126 * only.
2127 */
2128 psend = c->_c.mfc_parent;
2129 goto last_forward;
2130 }
2131 goto dont_forward;
2132 }
2133 for (ct = c->_c.mfc_un.res.maxvif - 1;
2134 ct >= c->_c.mfc_un.res.minvif; ct--) {
2135 /* For (*,G) entry, don't forward to the incoming interface */
2136 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2137 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2138 if (psend != -1) {
2139 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2140 if (skb2)
2141 ip6mr_forward2(net, mrt, skb2,
2142 c, psend);
2143 }
2144 psend = ct;
2145 }
2146 }
2147last_forward:
2148 if (psend != -1) {
2149 ip6mr_forward2(net, mrt, skb, c, psend);
2150 return;
2151 }
2152
2153dont_forward:
2154 kfree_skb(skb);
2155}
2156
2157
2158/*
2159 * Multicast packets for forwarding arrive here
2160 */
2161
2162int ip6_mr_input(struct sk_buff *skb)
2163{
2164 struct mfc6_cache *cache;
2165 struct net *net = dev_net(skb->dev);
2166 struct mr_table *mrt;
2167 struct flowi6 fl6 = {
2168 .flowi6_iif = skb->dev->ifindex,
2169 .flowi6_mark = skb->mark,
2170 };
2171 int err;
2172
2173 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2174 if (err < 0) {
2175 kfree_skb(skb);
2176 return err;
2177 }
2178
2179 read_lock(&mrt_lock);
2180 cache = ip6mr_cache_find(mrt,
2181 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2182 if (!cache) {
2183 int vif = ip6mr_find_vif(mrt, skb->dev);
2184
2185 if (vif >= 0)
2186 cache = ip6mr_cache_find_any(mrt,
2187 &ipv6_hdr(skb)->daddr,
2188 vif);
2189 }
2190
2191 /*
2192 * No usable cache entry
2193 */
2194 if (!cache) {
2195 int vif;
2196
2197 vif = ip6mr_find_vif(mrt, skb->dev);
2198 if (vif >= 0) {
2199 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2200 read_unlock(&mrt_lock);
2201
2202 return err;
2203 }
2204 read_unlock(&mrt_lock);
2205 kfree_skb(skb);
2206 return -ENODEV;
2207 }
2208
2209 ip6_mr_forward(net, mrt, skb, cache);
2210
2211 read_unlock(&mrt_lock);
2212
2213 return 0;
2214}
2215
2216int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2217 u32 portid)
2218{
2219 int err;
2220 struct mr_table *mrt;
2221 struct mfc6_cache *cache;
2222 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2223
2224 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2225 if (!mrt)
2226 return -ENOENT;
2227
2228 read_lock(&mrt_lock);
2229 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2230 if (!cache && skb->dev) {
2231 int vif = ip6mr_find_vif(mrt, skb->dev);
2232
2233 if (vif >= 0)
2234 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2235 vif);
2236 }
2237
2238 if (!cache) {
2239 struct sk_buff *skb2;
2240 struct ipv6hdr *iph;
2241 struct net_device *dev;
2242 int vif;
2243
2244 dev = skb->dev;
2245 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2246 read_unlock(&mrt_lock);
2247 return -ENODEV;
2248 }
2249
2250 /* really correct? */
2251 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2252 if (!skb2) {
2253 read_unlock(&mrt_lock);
2254 return -ENOMEM;
2255 }
2256
2257 NETLINK_CB(skb2).portid = portid;
2258 skb_reset_transport_header(skb2);
2259
2260 skb_put(skb2, sizeof(struct ipv6hdr));
2261 skb_reset_network_header(skb2);
2262
2263 iph = ipv6_hdr(skb2);
2264 iph->version = 0;
2265 iph->priority = 0;
2266 iph->flow_lbl[0] = 0;
2267 iph->flow_lbl[1] = 0;
2268 iph->flow_lbl[2] = 0;
2269 iph->payload_len = 0;
2270 iph->nexthdr = IPPROTO_NONE;
2271 iph->hop_limit = 0;
2272 iph->saddr = rt->rt6i_src.addr;
2273 iph->daddr = rt->rt6i_dst.addr;
2274
2275 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2276 read_unlock(&mrt_lock);
2277
2278 return err;
2279 }
2280
2281 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2282 read_unlock(&mrt_lock);
2283 return err;
2284}
2285
2286static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2287 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2288 int flags)
2289{
2290 struct nlmsghdr *nlh;
2291 struct rtmsg *rtm;
2292 int err;
2293
2294 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2295 if (!nlh)
2296 return -EMSGSIZE;
2297
2298 rtm = nlmsg_data(nlh);
2299 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2300 rtm->rtm_dst_len = 128;
2301 rtm->rtm_src_len = 128;
2302 rtm->rtm_tos = 0;
2303 rtm->rtm_table = mrt->id;
2304 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2305 goto nla_put_failure;
2306 rtm->rtm_type = RTN_MULTICAST;
2307 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2308 if (c->_c.mfc_flags & MFC_STATIC)
2309 rtm->rtm_protocol = RTPROT_STATIC;
2310 else
2311 rtm->rtm_protocol = RTPROT_MROUTED;
2312 rtm->rtm_flags = 0;
2313
2314 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2315 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2316 goto nla_put_failure;
2317 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2318 /* do not break the dump if cache is unresolved */
2319 if (err < 0 && err != -ENOENT)
2320 goto nla_put_failure;
2321
2322 nlmsg_end(skb, nlh);
2323 return 0;
2324
2325nla_put_failure:
2326 nlmsg_cancel(skb, nlh);
2327 return -EMSGSIZE;
2328}
2329
2330static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2331 u32 portid, u32 seq, struct mr_mfc *c,
2332 int cmd, int flags)
2333{
2334 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2335 cmd, flags);
2336}
2337
2338static int mr6_msgsize(bool unresolved, int maxvif)
2339{
2340 size_t len =
2341 NLMSG_ALIGN(sizeof(struct rtmsg))
2342 + nla_total_size(4) /* RTA_TABLE */
2343 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2344 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2345 ;
2346
2347 if (!unresolved)
2348 len = len
2349 + nla_total_size(4) /* RTA_IIF */
2350 + nla_total_size(0) /* RTA_MULTIPATH */
2351 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2352 /* RTA_MFC_STATS */
2353 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2354 ;
2355
2356 return len;
2357}
2358
2359static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2360 int cmd)
2361{
2362 struct net *net = read_pnet(&mrt->net);
2363 struct sk_buff *skb;
2364 int err = -ENOBUFS;
2365
2366 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2367 GFP_ATOMIC);
2368 if (!skb)
2369 goto errout;
2370
2371 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2372 if (err < 0)
2373 goto errout;
2374
2375 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2376 return;
2377
2378errout:
2379 kfree_skb(skb);
2380 if (err < 0)
2381 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2382}
2383
2384static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2385{
2386 size_t len =
2387 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2388 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2389 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2390 /* IP6MRA_CREPORT_SRC_ADDR */
2391 + nla_total_size(sizeof(struct in6_addr))
2392 /* IP6MRA_CREPORT_DST_ADDR */
2393 + nla_total_size(sizeof(struct in6_addr))
2394 /* IP6MRA_CREPORT_PKT */
2395 + nla_total_size(payloadlen)
2396 ;
2397
2398 return len;
2399}
2400
2401static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2402{
2403 struct net *net = read_pnet(&mrt->net);
2404 struct nlmsghdr *nlh;
2405 struct rtgenmsg *rtgenm;
2406 struct mrt6msg *msg;
2407 struct sk_buff *skb;
2408 struct nlattr *nla;
2409 int payloadlen;
2410
2411 payloadlen = pkt->len - sizeof(struct mrt6msg);
2412 msg = (struct mrt6msg *)skb_transport_header(pkt);
2413
2414 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2415 if (!skb)
2416 goto errout;
2417
2418 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2419 sizeof(struct rtgenmsg), 0);
2420 if (!nlh)
2421 goto errout;
2422 rtgenm = nlmsg_data(nlh);
2423 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2424 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2425 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2426 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2427 &msg->im6_src) ||
2428 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2429 &msg->im6_dst))
2430 goto nla_put_failure;
2431
2432 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2433 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2434 nla_data(nla), payloadlen))
2435 goto nla_put_failure;
2436
2437 nlmsg_end(skb, nlh);
2438
2439 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2440 return;
2441
2442nla_put_failure:
2443 nlmsg_cancel(skb, nlh);
2444errout:
2445 kfree_skb(skb);
2446 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2447}
2448
2449static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2450{
2451 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2452 _ip6mr_fill_mroute, &mfc_unres_lock);
2453}