Loading...
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
26 *
27 */
28
29#include <asm/uaccess.h>
30#include <linux/types.h>
31#include <linux/capability.h>
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
48#include <linux/if_ether.h>
49#include <linux/slab.h>
50#include <net/net_namespace.h>
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
54#include <net/route.h>
55#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
62#include <linux/compat.h>
63#include <linux/export.h>
64#include <net/ip_tunnels.h>
65#include <net/checksum.h>
66#include <net/netlink.h>
67#include <net/fib_rules.h>
68#include <linux/netconf.h>
69#include <net/nexthop.h>
70
71struct ipmr_rule {
72 struct fib_rule common;
73};
74
75struct ipmr_result {
76 struct mr_table *mrt;
77};
78
79/* Big lock, protecting vif table, mrt cache and mroute socket state.
80 * Note that the changes are semaphored via rtnl_lock.
81 */
82
83static DEFINE_RWLOCK(mrt_lock);
84
85/* Multicast router control variables */
86
87/* Special spinlock for queue of unresolved entries */
88static DEFINE_SPINLOCK(mfc_unres_lock);
89
90/* We return to original Alan's scheme. Hash table of resolved
91 * entries is changed only in process context and protected
92 * with weak lock mrt_lock. Queue of unresolved entries is protected
93 * with strong spinlock mfc_unres_lock.
94 *
95 * In this case data path is free of exclusive locks at all.
96 */
97
98static struct kmem_cache *mrt_cachep __read_mostly;
99
100static struct mr_table *ipmr_new_table(struct net *net, u32 id);
101static void ipmr_free_table(struct mr_table *mrt);
102
103static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 struct sk_buff *skb, struct mfc_cache *cache,
105 int local);
106static int ipmr_cache_report(struct mr_table *mrt,
107 struct sk_buff *pkt, vifi_t vifi, int assert);
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
109 struct mfc_cache *c, struct rtmsg *rtm);
110static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
111 int cmd);
112static void mroute_clean_tables(struct mr_table *mrt, bool all);
113static void ipmr_expire_process(unsigned long arg);
114
115#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
116#define ipmr_for_each_table(mrt, net) \
117 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
118
119static struct mr_table *ipmr_get_table(struct net *net, u32 id)
120{
121 struct mr_table *mrt;
122
123 ipmr_for_each_table(mrt, net) {
124 if (mrt->id == id)
125 return mrt;
126 }
127 return NULL;
128}
129
130static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
131 struct mr_table **mrt)
132{
133 int err;
134 struct ipmr_result res;
135 struct fib_lookup_arg arg = {
136 .result = &res,
137 .flags = FIB_LOOKUP_NOREF,
138 };
139
140 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
141 flowi4_to_flowi(flp4), 0, &arg);
142 if (err < 0)
143 return err;
144 *mrt = res.mrt;
145 return 0;
146}
147
148static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
149 int flags, struct fib_lookup_arg *arg)
150{
151 struct ipmr_result *res = arg->result;
152 struct mr_table *mrt;
153
154 switch (rule->action) {
155 case FR_ACT_TO_TBL:
156 break;
157 case FR_ACT_UNREACHABLE:
158 return -ENETUNREACH;
159 case FR_ACT_PROHIBIT:
160 return -EACCES;
161 case FR_ACT_BLACKHOLE:
162 default:
163 return -EINVAL;
164 }
165
166 mrt = ipmr_get_table(rule->fr_net, rule->table);
167 if (!mrt)
168 return -EAGAIN;
169 res->mrt = mrt;
170 return 0;
171}
172
173static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
174{
175 return 1;
176}
177
178static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
179 FRA_GENERIC_POLICY,
180};
181
182static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
183 struct fib_rule_hdr *frh, struct nlattr **tb)
184{
185 return 0;
186}
187
188static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
189 struct nlattr **tb)
190{
191 return 1;
192}
193
194static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
195 struct fib_rule_hdr *frh)
196{
197 frh->dst_len = 0;
198 frh->src_len = 0;
199 frh->tos = 0;
200 return 0;
201}
202
203static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
204 .family = RTNL_FAMILY_IPMR,
205 .rule_size = sizeof(struct ipmr_rule),
206 .addr_size = sizeof(u32),
207 .action = ipmr_rule_action,
208 .match = ipmr_rule_match,
209 .configure = ipmr_rule_configure,
210 .compare = ipmr_rule_compare,
211 .fill = ipmr_rule_fill,
212 .nlgroup = RTNLGRP_IPV4_RULE,
213 .policy = ipmr_rule_policy,
214 .owner = THIS_MODULE,
215};
216
217static int __net_init ipmr_rules_init(struct net *net)
218{
219 struct fib_rules_ops *ops;
220 struct mr_table *mrt;
221 int err;
222
223 ops = fib_rules_register(&ipmr_rules_ops_template, net);
224 if (IS_ERR(ops))
225 return PTR_ERR(ops);
226
227 INIT_LIST_HEAD(&net->ipv4.mr_tables);
228
229 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
230 if (IS_ERR(mrt)) {
231 err = PTR_ERR(mrt);
232 goto err1;
233 }
234
235 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
236 if (err < 0)
237 goto err2;
238
239 net->ipv4.mr_rules_ops = ops;
240 return 0;
241
242err2:
243 ipmr_free_table(mrt);
244err1:
245 fib_rules_unregister(ops);
246 return err;
247}
248
249static void __net_exit ipmr_rules_exit(struct net *net)
250{
251 struct mr_table *mrt, *next;
252
253 rtnl_lock();
254 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
255 list_del(&mrt->list);
256 ipmr_free_table(mrt);
257 }
258 fib_rules_unregister(net->ipv4.mr_rules_ops);
259 rtnl_unlock();
260}
261#else
262#define ipmr_for_each_table(mrt, net) \
263 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
264
265static struct mr_table *ipmr_get_table(struct net *net, u32 id)
266{
267 return net->ipv4.mrt;
268}
269
270static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
271 struct mr_table **mrt)
272{
273 *mrt = net->ipv4.mrt;
274 return 0;
275}
276
277static int __net_init ipmr_rules_init(struct net *net)
278{
279 struct mr_table *mrt;
280
281 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
282 if (IS_ERR(mrt))
283 return PTR_ERR(mrt);
284 net->ipv4.mrt = mrt;
285 return 0;
286}
287
288static void __net_exit ipmr_rules_exit(struct net *net)
289{
290 rtnl_lock();
291 ipmr_free_table(net->ipv4.mrt);
292 net->ipv4.mrt = NULL;
293 rtnl_unlock();
294}
295#endif
296
297static struct mr_table *ipmr_new_table(struct net *net, u32 id)
298{
299 struct mr_table *mrt;
300 unsigned int i;
301
302 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
303 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
304 return ERR_PTR(-EINVAL);
305
306 mrt = ipmr_get_table(net, id);
307 if (mrt)
308 return mrt;
309
310 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
311 if (!mrt)
312 return ERR_PTR(-ENOMEM);
313 write_pnet(&mrt->net, net);
314 mrt->id = id;
315
316 /* Forwarding cache */
317 for (i = 0; i < MFC_LINES; i++)
318 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
319
320 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
321
322 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
323 (unsigned long)mrt);
324
325 mrt->mroute_reg_vif_num = -1;
326#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
327 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
328#endif
329 return mrt;
330}
331
332static void ipmr_free_table(struct mr_table *mrt)
333{
334 del_timer_sync(&mrt->ipmr_expire_timer);
335 mroute_clean_tables(mrt, true);
336 kfree(mrt);
337}
338
339/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340
341static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
342{
343 struct net *net = dev_net(dev);
344
345 dev_close(dev);
346
347 dev = __dev_get_by_name(net, "tunl0");
348 if (dev) {
349 const struct net_device_ops *ops = dev->netdev_ops;
350 struct ifreq ifr;
351 struct ip_tunnel_parm p;
352
353 memset(&p, 0, sizeof(p));
354 p.iph.daddr = v->vifc_rmt_addr.s_addr;
355 p.iph.saddr = v->vifc_lcl_addr.s_addr;
356 p.iph.version = 4;
357 p.iph.ihl = 5;
358 p.iph.protocol = IPPROTO_IPIP;
359 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
360 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
361
362 if (ops->ndo_do_ioctl) {
363 mm_segment_t oldfs = get_fs();
364
365 set_fs(KERNEL_DS);
366 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
367 set_fs(oldfs);
368 }
369 }
370}
371
372/* Initialize ipmr pimreg/tunnel in_device */
373static bool ipmr_init_vif_indev(const struct net_device *dev)
374{
375 struct in_device *in_dev;
376
377 ASSERT_RTNL();
378
379 in_dev = __in_dev_get_rtnl(dev);
380 if (!in_dev)
381 return false;
382 ipv4_devconf_setall(in_dev);
383 neigh_parms_data_state_setall(in_dev->arp_parms);
384 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
385
386 return true;
387}
388
389static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
390{
391 struct net_device *dev;
392
393 dev = __dev_get_by_name(net, "tunl0");
394
395 if (dev) {
396 const struct net_device_ops *ops = dev->netdev_ops;
397 int err;
398 struct ifreq ifr;
399 struct ip_tunnel_parm p;
400
401 memset(&p, 0, sizeof(p));
402 p.iph.daddr = v->vifc_rmt_addr.s_addr;
403 p.iph.saddr = v->vifc_lcl_addr.s_addr;
404 p.iph.version = 4;
405 p.iph.ihl = 5;
406 p.iph.protocol = IPPROTO_IPIP;
407 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
408 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
409
410 if (ops->ndo_do_ioctl) {
411 mm_segment_t oldfs = get_fs();
412
413 set_fs(KERNEL_DS);
414 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
415 set_fs(oldfs);
416 } else {
417 err = -EOPNOTSUPP;
418 }
419 dev = NULL;
420
421 if (err == 0 &&
422 (dev = __dev_get_by_name(net, p.name)) != NULL) {
423 dev->flags |= IFF_MULTICAST;
424 if (!ipmr_init_vif_indev(dev))
425 goto failure;
426 if (dev_open(dev))
427 goto failure;
428 dev_hold(dev);
429 }
430 }
431 return dev;
432
433failure:
434 unregister_netdevice(dev);
435 return NULL;
436}
437
438#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
439static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
440{
441 struct net *net = dev_net(dev);
442 struct mr_table *mrt;
443 struct flowi4 fl4 = {
444 .flowi4_oif = dev->ifindex,
445 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
446 .flowi4_mark = skb->mark,
447 };
448 int err;
449
450 err = ipmr_fib_lookup(net, &fl4, &mrt);
451 if (err < 0) {
452 kfree_skb(skb);
453 return err;
454 }
455
456 read_lock(&mrt_lock);
457 dev->stats.tx_bytes += skb->len;
458 dev->stats.tx_packets++;
459 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
460 read_unlock(&mrt_lock);
461 kfree_skb(skb);
462 return NETDEV_TX_OK;
463}
464
465static int reg_vif_get_iflink(const struct net_device *dev)
466{
467 return 0;
468}
469
470static const struct net_device_ops reg_vif_netdev_ops = {
471 .ndo_start_xmit = reg_vif_xmit,
472 .ndo_get_iflink = reg_vif_get_iflink,
473};
474
475static void reg_vif_setup(struct net_device *dev)
476{
477 dev->type = ARPHRD_PIMREG;
478 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
479 dev->flags = IFF_NOARP;
480 dev->netdev_ops = ®_vif_netdev_ops;
481 dev->destructor = free_netdev;
482 dev->features |= NETIF_F_NETNS_LOCAL;
483}
484
485static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
486{
487 struct net_device *dev;
488 char name[IFNAMSIZ];
489
490 if (mrt->id == RT_TABLE_DEFAULT)
491 sprintf(name, "pimreg");
492 else
493 sprintf(name, "pimreg%u", mrt->id);
494
495 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
496
497 if (!dev)
498 return NULL;
499
500 dev_net_set(dev, net);
501
502 if (register_netdevice(dev)) {
503 free_netdev(dev);
504 return NULL;
505 }
506
507 if (!ipmr_init_vif_indev(dev))
508 goto failure;
509 if (dev_open(dev))
510 goto failure;
511
512 dev_hold(dev);
513
514 return dev;
515
516failure:
517 unregister_netdevice(dev);
518 return NULL;
519}
520
521/* called with rcu_read_lock() */
522static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
523 unsigned int pimlen)
524{
525 struct net_device *reg_dev = NULL;
526 struct iphdr *encap;
527
528 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
529 /* Check that:
530 * a. packet is really sent to a multicast group
531 * b. packet is not a NULL-REGISTER
532 * c. packet is not truncated
533 */
534 if (!ipv4_is_multicast(encap->daddr) ||
535 encap->tot_len == 0 ||
536 ntohs(encap->tot_len) + pimlen > skb->len)
537 return 1;
538
539 read_lock(&mrt_lock);
540 if (mrt->mroute_reg_vif_num >= 0)
541 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
542 read_unlock(&mrt_lock);
543
544 if (!reg_dev)
545 return 1;
546
547 skb->mac_header = skb->network_header;
548 skb_pull(skb, (u8 *)encap - skb->data);
549 skb_reset_network_header(skb);
550 skb->protocol = htons(ETH_P_IP);
551 skb->ip_summed = CHECKSUM_NONE;
552
553 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
554
555 netif_rx(skb);
556
557 return NET_RX_SUCCESS;
558}
559#else
560static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
561{
562 return NULL;
563}
564#endif
565
566/**
567 * vif_delete - Delete a VIF entry
568 * @notify: Set to 1, if the caller is a notifier_call
569 */
570static int vif_delete(struct mr_table *mrt, int vifi, int notify,
571 struct list_head *head)
572{
573 struct vif_device *v;
574 struct net_device *dev;
575 struct in_device *in_dev;
576
577 if (vifi < 0 || vifi >= mrt->maxvif)
578 return -EADDRNOTAVAIL;
579
580 v = &mrt->vif_table[vifi];
581
582 write_lock_bh(&mrt_lock);
583 dev = v->dev;
584 v->dev = NULL;
585
586 if (!dev) {
587 write_unlock_bh(&mrt_lock);
588 return -EADDRNOTAVAIL;
589 }
590
591 if (vifi == mrt->mroute_reg_vif_num)
592 mrt->mroute_reg_vif_num = -1;
593
594 if (vifi + 1 == mrt->maxvif) {
595 int tmp;
596
597 for (tmp = vifi - 1; tmp >= 0; tmp--) {
598 if (VIF_EXISTS(mrt, tmp))
599 break;
600 }
601 mrt->maxvif = tmp+1;
602 }
603
604 write_unlock_bh(&mrt_lock);
605
606 dev_set_allmulti(dev, -1);
607
608 in_dev = __in_dev_get_rtnl(dev);
609 if (in_dev) {
610 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
611 inet_netconf_notify_devconf(dev_net(dev),
612 NETCONFA_MC_FORWARDING,
613 dev->ifindex, &in_dev->cnf);
614 ip_rt_multicast_event(in_dev);
615 }
616
617 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
618 unregister_netdevice_queue(dev, head);
619
620 dev_put(dev);
621 return 0;
622}
623
624static void ipmr_cache_free_rcu(struct rcu_head *head)
625{
626 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
627
628 kmem_cache_free(mrt_cachep, c);
629}
630
631static inline void ipmr_cache_free(struct mfc_cache *c)
632{
633 call_rcu(&c->rcu, ipmr_cache_free_rcu);
634}
635
636/* Destroy an unresolved cache entry, killing queued skbs
637 * and reporting error to netlink readers.
638 */
639static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
640{
641 struct net *net = read_pnet(&mrt->net);
642 struct sk_buff *skb;
643 struct nlmsgerr *e;
644
645 atomic_dec(&mrt->cache_resolve_queue_len);
646
647 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
648 if (ip_hdr(skb)->version == 0) {
649 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
650 nlh->nlmsg_type = NLMSG_ERROR;
651 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
652 skb_trim(skb, nlh->nlmsg_len);
653 e = nlmsg_data(nlh);
654 e->error = -ETIMEDOUT;
655 memset(&e->msg, 0, sizeof(e->msg));
656
657 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
658 } else {
659 kfree_skb(skb);
660 }
661 }
662
663 ipmr_cache_free(c);
664}
665
666/* Timer process for the unresolved queue. */
667static void ipmr_expire_process(unsigned long arg)
668{
669 struct mr_table *mrt = (struct mr_table *)arg;
670 unsigned long now;
671 unsigned long expires;
672 struct mfc_cache *c, *next;
673
674 if (!spin_trylock(&mfc_unres_lock)) {
675 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
676 return;
677 }
678
679 if (list_empty(&mrt->mfc_unres_queue))
680 goto out;
681
682 now = jiffies;
683 expires = 10*HZ;
684
685 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
686 if (time_after(c->mfc_un.unres.expires, now)) {
687 unsigned long interval = c->mfc_un.unres.expires - now;
688 if (interval < expires)
689 expires = interval;
690 continue;
691 }
692
693 list_del(&c->list);
694 mroute_netlink_event(mrt, c, RTM_DELROUTE);
695 ipmr_destroy_unres(mrt, c);
696 }
697
698 if (!list_empty(&mrt->mfc_unres_queue))
699 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
700
701out:
702 spin_unlock(&mfc_unres_lock);
703}
704
705/* Fill oifs list. It is called under write locked mrt_lock. */
706static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
707 unsigned char *ttls)
708{
709 int vifi;
710
711 cache->mfc_un.res.minvif = MAXVIFS;
712 cache->mfc_un.res.maxvif = 0;
713 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
714
715 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
716 if (VIF_EXISTS(mrt, vifi) &&
717 ttls[vifi] && ttls[vifi] < 255) {
718 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
719 if (cache->mfc_un.res.minvif > vifi)
720 cache->mfc_un.res.minvif = vifi;
721 if (cache->mfc_un.res.maxvif <= vifi)
722 cache->mfc_un.res.maxvif = vifi + 1;
723 }
724 }
725}
726
727static int vif_add(struct net *net, struct mr_table *mrt,
728 struct vifctl *vifc, int mrtsock)
729{
730 int vifi = vifc->vifc_vifi;
731 struct vif_device *v = &mrt->vif_table[vifi];
732 struct net_device *dev;
733 struct in_device *in_dev;
734 int err;
735
736 /* Is vif busy ? */
737 if (VIF_EXISTS(mrt, vifi))
738 return -EADDRINUSE;
739
740 switch (vifc->vifc_flags) {
741 case VIFF_REGISTER:
742 if (!ipmr_pimsm_enabled())
743 return -EINVAL;
744 /* Special Purpose VIF in PIM
745 * All the packets will be sent to the daemon
746 */
747 if (mrt->mroute_reg_vif_num >= 0)
748 return -EADDRINUSE;
749 dev = ipmr_reg_vif(net, mrt);
750 if (!dev)
751 return -ENOBUFS;
752 err = dev_set_allmulti(dev, 1);
753 if (err) {
754 unregister_netdevice(dev);
755 dev_put(dev);
756 return err;
757 }
758 break;
759 case VIFF_TUNNEL:
760 dev = ipmr_new_tunnel(net, vifc);
761 if (!dev)
762 return -ENOBUFS;
763 err = dev_set_allmulti(dev, 1);
764 if (err) {
765 ipmr_del_tunnel(dev, vifc);
766 dev_put(dev);
767 return err;
768 }
769 break;
770 case VIFF_USE_IFINDEX:
771 case 0:
772 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
773 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
774 if (dev && !__in_dev_get_rtnl(dev)) {
775 dev_put(dev);
776 return -EADDRNOTAVAIL;
777 }
778 } else {
779 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
780 }
781 if (!dev)
782 return -EADDRNOTAVAIL;
783 err = dev_set_allmulti(dev, 1);
784 if (err) {
785 dev_put(dev);
786 return err;
787 }
788 break;
789 default:
790 return -EINVAL;
791 }
792
793 in_dev = __in_dev_get_rtnl(dev);
794 if (!in_dev) {
795 dev_put(dev);
796 return -EADDRNOTAVAIL;
797 }
798 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
799 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
800 &in_dev->cnf);
801 ip_rt_multicast_event(in_dev);
802
803 /* Fill in the VIF structures */
804
805 v->rate_limit = vifc->vifc_rate_limit;
806 v->local = vifc->vifc_lcl_addr.s_addr;
807 v->remote = vifc->vifc_rmt_addr.s_addr;
808 v->flags = vifc->vifc_flags;
809 if (!mrtsock)
810 v->flags |= VIFF_STATIC;
811 v->threshold = vifc->vifc_threshold;
812 v->bytes_in = 0;
813 v->bytes_out = 0;
814 v->pkt_in = 0;
815 v->pkt_out = 0;
816 v->link = dev->ifindex;
817 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
818 v->link = dev_get_iflink(dev);
819
820 /* And finish update writing critical data */
821 write_lock_bh(&mrt_lock);
822 v->dev = dev;
823 if (v->flags & VIFF_REGISTER)
824 mrt->mroute_reg_vif_num = vifi;
825 if (vifi+1 > mrt->maxvif)
826 mrt->maxvif = vifi+1;
827 write_unlock_bh(&mrt_lock);
828 return 0;
829}
830
831/* called with rcu_read_lock() */
832static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
833 __be32 origin,
834 __be32 mcastgrp)
835{
836 int line = MFC_HASH(mcastgrp, origin);
837 struct mfc_cache *c;
838
839 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
840 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
841 return c;
842 }
843 return NULL;
844}
845
846/* Look for a (*,*,oif) entry */
847static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
848 int vifi)
849{
850 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
851 struct mfc_cache *c;
852
853 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
854 if (c->mfc_origin == htonl(INADDR_ANY) &&
855 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
856 c->mfc_un.res.ttls[vifi] < 255)
857 return c;
858
859 return NULL;
860}
861
862/* Look for a (*,G) entry */
863static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
864 __be32 mcastgrp, int vifi)
865{
866 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
867 struct mfc_cache *c, *proxy;
868
869 if (mcastgrp == htonl(INADDR_ANY))
870 goto skip;
871
872 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
873 if (c->mfc_origin == htonl(INADDR_ANY) &&
874 c->mfc_mcastgrp == mcastgrp) {
875 if (c->mfc_un.res.ttls[vifi] < 255)
876 return c;
877
878 /* It's ok if the vifi is part of the static tree */
879 proxy = ipmr_cache_find_any_parent(mrt,
880 c->mfc_parent);
881 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
882 return c;
883 }
884
885skip:
886 return ipmr_cache_find_any_parent(mrt, vifi);
887}
888
889/* Allocate a multicast cache entry */
890static struct mfc_cache *ipmr_cache_alloc(void)
891{
892 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
893
894 if (c)
895 c->mfc_un.res.minvif = MAXVIFS;
896 return c;
897}
898
899static struct mfc_cache *ipmr_cache_alloc_unres(void)
900{
901 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
902
903 if (c) {
904 skb_queue_head_init(&c->mfc_un.unres.unresolved);
905 c->mfc_un.unres.expires = jiffies + 10*HZ;
906 }
907 return c;
908}
909
910/* A cache entry has gone into a resolved state from queued */
911static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
912 struct mfc_cache *uc, struct mfc_cache *c)
913{
914 struct sk_buff *skb;
915 struct nlmsgerr *e;
916
917 /* Play the pending entries through our router */
918 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
919 if (ip_hdr(skb)->version == 0) {
920 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
921
922 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
923 nlh->nlmsg_len = skb_tail_pointer(skb) -
924 (u8 *)nlh;
925 } else {
926 nlh->nlmsg_type = NLMSG_ERROR;
927 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
928 skb_trim(skb, nlh->nlmsg_len);
929 e = nlmsg_data(nlh);
930 e->error = -EMSGSIZE;
931 memset(&e->msg, 0, sizeof(e->msg));
932 }
933
934 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
935 } else {
936 ip_mr_forward(net, mrt, skb, c, 0);
937 }
938 }
939}
940
941/* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
942 * expects the following bizarre scheme.
943 *
944 * Called under mrt_lock.
945 */
946static int ipmr_cache_report(struct mr_table *mrt,
947 struct sk_buff *pkt, vifi_t vifi, int assert)
948{
949 const int ihl = ip_hdrlen(pkt);
950 struct sock *mroute_sk;
951 struct igmphdr *igmp;
952 struct igmpmsg *msg;
953 struct sk_buff *skb;
954 int ret;
955
956 if (assert == IGMPMSG_WHOLEPKT)
957 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
958 else
959 skb = alloc_skb(128, GFP_ATOMIC);
960
961 if (!skb)
962 return -ENOBUFS;
963
964 if (assert == IGMPMSG_WHOLEPKT) {
965 /* Ugly, but we have no choice with this interface.
966 * Duplicate old header, fix ihl, length etc.
967 * And all this only to mangle msg->im_msgtype and
968 * to set msg->im_mbz to "mbz" :-)
969 */
970 skb_push(skb, sizeof(struct iphdr));
971 skb_reset_network_header(skb);
972 skb_reset_transport_header(skb);
973 msg = (struct igmpmsg *)skb_network_header(skb);
974 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
975 msg->im_msgtype = IGMPMSG_WHOLEPKT;
976 msg->im_mbz = 0;
977 msg->im_vif = mrt->mroute_reg_vif_num;
978 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
979 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
980 sizeof(struct iphdr));
981 } else {
982 /* Copy the IP header */
983 skb_set_network_header(skb, skb->len);
984 skb_put(skb, ihl);
985 skb_copy_to_linear_data(skb, pkt->data, ihl);
986 /* Flag to the kernel this is a route add */
987 ip_hdr(skb)->protocol = 0;
988 msg = (struct igmpmsg *)skb_network_header(skb);
989 msg->im_vif = vifi;
990 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
991 /* Add our header */
992 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
993 igmp->type = assert;
994 msg->im_msgtype = assert;
995 igmp->code = 0;
996 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
997 skb->transport_header = skb->network_header;
998 }
999
1000 rcu_read_lock();
1001 mroute_sk = rcu_dereference(mrt->mroute_sk);
1002 if (!mroute_sk) {
1003 rcu_read_unlock();
1004 kfree_skb(skb);
1005 return -EINVAL;
1006 }
1007
1008 /* Deliver to mrouted */
1009 ret = sock_queue_rcv_skb(mroute_sk, skb);
1010 rcu_read_unlock();
1011 if (ret < 0) {
1012 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1013 kfree_skb(skb);
1014 }
1015
1016 return ret;
1017}
1018
1019/* Queue a packet for resolution. It gets locked cache entry! */
1020static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1021 struct sk_buff *skb)
1022{
1023 bool found = false;
1024 int err;
1025 struct mfc_cache *c;
1026 const struct iphdr *iph = ip_hdr(skb);
1027
1028 spin_lock_bh(&mfc_unres_lock);
1029 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1030 if (c->mfc_mcastgrp == iph->daddr &&
1031 c->mfc_origin == iph->saddr) {
1032 found = true;
1033 break;
1034 }
1035 }
1036
1037 if (!found) {
1038 /* Create a new entry if allowable */
1039 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1040 (c = ipmr_cache_alloc_unres()) == NULL) {
1041 spin_unlock_bh(&mfc_unres_lock);
1042
1043 kfree_skb(skb);
1044 return -ENOBUFS;
1045 }
1046
1047 /* Fill in the new cache entry */
1048 c->mfc_parent = -1;
1049 c->mfc_origin = iph->saddr;
1050 c->mfc_mcastgrp = iph->daddr;
1051
1052 /* Reflect first query at mrouted. */
1053 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1054 if (err < 0) {
1055 /* If the report failed throw the cache entry
1056 out - Brad Parker
1057 */
1058 spin_unlock_bh(&mfc_unres_lock);
1059
1060 ipmr_cache_free(c);
1061 kfree_skb(skb);
1062 return err;
1063 }
1064
1065 atomic_inc(&mrt->cache_resolve_queue_len);
1066 list_add(&c->list, &mrt->mfc_unres_queue);
1067 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1068
1069 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1070 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1071 }
1072
1073 /* See if we can append the packet */
1074 if (c->mfc_un.unres.unresolved.qlen > 3) {
1075 kfree_skb(skb);
1076 err = -ENOBUFS;
1077 } else {
1078 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1079 err = 0;
1080 }
1081
1082 spin_unlock_bh(&mfc_unres_lock);
1083 return err;
1084}
1085
1086/* MFC cache manipulation by user space mroute daemon */
1087
1088static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1089{
1090 int line;
1091 struct mfc_cache *c, *next;
1092
1093 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1094
1095 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1096 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1097 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1098 (parent == -1 || parent == c->mfc_parent)) {
1099 list_del_rcu(&c->list);
1100 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1101 ipmr_cache_free(c);
1102 return 0;
1103 }
1104 }
1105 return -ENOENT;
1106}
1107
1108static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1109 struct mfcctl *mfc, int mrtsock, int parent)
1110{
1111 bool found = false;
1112 int line;
1113 struct mfc_cache *uc, *c;
1114
1115 if (mfc->mfcc_parent >= MAXVIFS)
1116 return -ENFILE;
1117
1118 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1119
1120 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1121 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1122 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1123 (parent == -1 || parent == c->mfc_parent)) {
1124 found = true;
1125 break;
1126 }
1127 }
1128
1129 if (found) {
1130 write_lock_bh(&mrt_lock);
1131 c->mfc_parent = mfc->mfcc_parent;
1132 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1133 if (!mrtsock)
1134 c->mfc_flags |= MFC_STATIC;
1135 write_unlock_bh(&mrt_lock);
1136 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1137 return 0;
1138 }
1139
1140 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1141 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1142 return -EINVAL;
1143
1144 c = ipmr_cache_alloc();
1145 if (!c)
1146 return -ENOMEM;
1147
1148 c->mfc_origin = mfc->mfcc_origin.s_addr;
1149 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1150 c->mfc_parent = mfc->mfcc_parent;
1151 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1152 if (!mrtsock)
1153 c->mfc_flags |= MFC_STATIC;
1154
1155 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1156
1157 /* Check to see if we resolved a queued list. If so we
1158 * need to send on the frames and tidy up.
1159 */
1160 found = false;
1161 spin_lock_bh(&mfc_unres_lock);
1162 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1163 if (uc->mfc_origin == c->mfc_origin &&
1164 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1165 list_del(&uc->list);
1166 atomic_dec(&mrt->cache_resolve_queue_len);
1167 found = true;
1168 break;
1169 }
1170 }
1171 if (list_empty(&mrt->mfc_unres_queue))
1172 del_timer(&mrt->ipmr_expire_timer);
1173 spin_unlock_bh(&mfc_unres_lock);
1174
1175 if (found) {
1176 ipmr_cache_resolve(net, mrt, uc, c);
1177 ipmr_cache_free(uc);
1178 }
1179 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1180 return 0;
1181}
1182
1183/* Close the multicast socket, and clear the vif tables etc */
1184static void mroute_clean_tables(struct mr_table *mrt, bool all)
1185{
1186 int i;
1187 LIST_HEAD(list);
1188 struct mfc_cache *c, *next;
1189
1190 /* Shut down all active vif entries */
1191 for (i = 0; i < mrt->maxvif; i++) {
1192 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1193 continue;
1194 vif_delete(mrt, i, 0, &list);
1195 }
1196 unregister_netdevice_many(&list);
1197
1198 /* Wipe the cache */
1199 for (i = 0; i < MFC_LINES; i++) {
1200 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1201 if (!all && (c->mfc_flags & MFC_STATIC))
1202 continue;
1203 list_del_rcu(&c->list);
1204 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1205 ipmr_cache_free(c);
1206 }
1207 }
1208
1209 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1210 spin_lock_bh(&mfc_unres_lock);
1211 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1212 list_del(&c->list);
1213 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1214 ipmr_destroy_unres(mrt, c);
1215 }
1216 spin_unlock_bh(&mfc_unres_lock);
1217 }
1218}
1219
1220/* called from ip_ra_control(), before an RCU grace period,
1221 * we dont need to call synchronize_rcu() here
1222 */
1223static void mrtsock_destruct(struct sock *sk)
1224{
1225 struct net *net = sock_net(sk);
1226 struct mr_table *mrt;
1227
1228 rtnl_lock();
1229 ipmr_for_each_table(mrt, net) {
1230 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1231 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1232 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1233 NETCONFA_IFINDEX_ALL,
1234 net->ipv4.devconf_all);
1235 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1236 mroute_clean_tables(mrt, false);
1237 }
1238 }
1239 rtnl_unlock();
1240}
1241
1242/* Socket options and virtual interface manipulation. The whole
1243 * virtual interface system is a complete heap, but unfortunately
1244 * that's how BSD mrouted happens to think. Maybe one day with a proper
1245 * MOSPF/PIM router set up we can clean this up.
1246 */
1247
1248int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1249 unsigned int optlen)
1250{
1251 struct net *net = sock_net(sk);
1252 int val, ret = 0, parent = 0;
1253 struct mr_table *mrt;
1254 struct vifctl vif;
1255 struct mfcctl mfc;
1256 u32 uval;
1257
1258 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1259 rtnl_lock();
1260 if (sk->sk_type != SOCK_RAW ||
1261 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1262 ret = -EOPNOTSUPP;
1263 goto out_unlock;
1264 }
1265
1266 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1267 if (!mrt) {
1268 ret = -ENOENT;
1269 goto out_unlock;
1270 }
1271 if (optname != MRT_INIT) {
1272 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1273 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1274 ret = -EACCES;
1275 goto out_unlock;
1276 }
1277 }
1278
1279 switch (optname) {
1280 case MRT_INIT:
1281 if (optlen != sizeof(int)) {
1282 ret = -EINVAL;
1283 break;
1284 }
1285 if (rtnl_dereference(mrt->mroute_sk)) {
1286 ret = -EADDRINUSE;
1287 break;
1288 }
1289
1290 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1291 if (ret == 0) {
1292 rcu_assign_pointer(mrt->mroute_sk, sk);
1293 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1294 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1295 NETCONFA_IFINDEX_ALL,
1296 net->ipv4.devconf_all);
1297 }
1298 break;
1299 case MRT_DONE:
1300 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1301 ret = -EACCES;
1302 } else {
1303 /* We need to unlock here because mrtsock_destruct takes
1304 * care of rtnl itself and we can't change that due to
1305 * the IP_ROUTER_ALERT setsockopt which runs without it.
1306 */
1307 rtnl_unlock();
1308 ret = ip_ra_control(sk, 0, NULL);
1309 goto out;
1310 }
1311 break;
1312 case MRT_ADD_VIF:
1313 case MRT_DEL_VIF:
1314 if (optlen != sizeof(vif)) {
1315 ret = -EINVAL;
1316 break;
1317 }
1318 if (copy_from_user(&vif, optval, sizeof(vif))) {
1319 ret = -EFAULT;
1320 break;
1321 }
1322 if (vif.vifc_vifi >= MAXVIFS) {
1323 ret = -ENFILE;
1324 break;
1325 }
1326 if (optname == MRT_ADD_VIF) {
1327 ret = vif_add(net, mrt, &vif,
1328 sk == rtnl_dereference(mrt->mroute_sk));
1329 } else {
1330 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1331 }
1332 break;
1333 /* Manipulate the forwarding caches. These live
1334 * in a sort of kernel/user symbiosis.
1335 */
1336 case MRT_ADD_MFC:
1337 case MRT_DEL_MFC:
1338 parent = -1;
1339 case MRT_ADD_MFC_PROXY:
1340 case MRT_DEL_MFC_PROXY:
1341 if (optlen != sizeof(mfc)) {
1342 ret = -EINVAL;
1343 break;
1344 }
1345 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1346 ret = -EFAULT;
1347 break;
1348 }
1349 if (parent == 0)
1350 parent = mfc.mfcc_parent;
1351 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1352 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1353 else
1354 ret = ipmr_mfc_add(net, mrt, &mfc,
1355 sk == rtnl_dereference(mrt->mroute_sk),
1356 parent);
1357 break;
1358 /* Control PIM assert. */
1359 case MRT_ASSERT:
1360 if (optlen != sizeof(val)) {
1361 ret = -EINVAL;
1362 break;
1363 }
1364 if (get_user(val, (int __user *)optval)) {
1365 ret = -EFAULT;
1366 break;
1367 }
1368 mrt->mroute_do_assert = val;
1369 break;
1370 case MRT_PIM:
1371 if (!ipmr_pimsm_enabled()) {
1372 ret = -ENOPROTOOPT;
1373 break;
1374 }
1375 if (optlen != sizeof(val)) {
1376 ret = -EINVAL;
1377 break;
1378 }
1379 if (get_user(val, (int __user *)optval)) {
1380 ret = -EFAULT;
1381 break;
1382 }
1383
1384 val = !!val;
1385 if (val != mrt->mroute_do_pim) {
1386 mrt->mroute_do_pim = val;
1387 mrt->mroute_do_assert = val;
1388 }
1389 break;
1390 case MRT_TABLE:
1391 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1392 ret = -ENOPROTOOPT;
1393 break;
1394 }
1395 if (optlen != sizeof(uval)) {
1396 ret = -EINVAL;
1397 break;
1398 }
1399 if (get_user(uval, (u32 __user *)optval)) {
1400 ret = -EFAULT;
1401 break;
1402 }
1403
1404 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1405 ret = -EBUSY;
1406 } else {
1407 mrt = ipmr_new_table(net, uval);
1408 if (IS_ERR(mrt))
1409 ret = PTR_ERR(mrt);
1410 else
1411 raw_sk(sk)->ipmr_table = uval;
1412 }
1413 break;
1414 /* Spurious command, or MRT_VERSION which you cannot set. */
1415 default:
1416 ret = -ENOPROTOOPT;
1417 }
1418out_unlock:
1419 rtnl_unlock();
1420out:
1421 return ret;
1422}
1423
1424/* Getsock opt support for the multicast routing system. */
1425int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1426{
1427 int olr;
1428 int val;
1429 struct net *net = sock_net(sk);
1430 struct mr_table *mrt;
1431
1432 if (sk->sk_type != SOCK_RAW ||
1433 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1434 return -EOPNOTSUPP;
1435
1436 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1437 if (!mrt)
1438 return -ENOENT;
1439
1440 switch (optname) {
1441 case MRT_VERSION:
1442 val = 0x0305;
1443 break;
1444 case MRT_PIM:
1445 if (!ipmr_pimsm_enabled())
1446 return -ENOPROTOOPT;
1447 val = mrt->mroute_do_pim;
1448 break;
1449 case MRT_ASSERT:
1450 val = mrt->mroute_do_assert;
1451 break;
1452 default:
1453 return -ENOPROTOOPT;
1454 }
1455
1456 if (get_user(olr, optlen))
1457 return -EFAULT;
1458 olr = min_t(unsigned int, olr, sizeof(int));
1459 if (olr < 0)
1460 return -EINVAL;
1461 if (put_user(olr, optlen))
1462 return -EFAULT;
1463 if (copy_to_user(optval, &val, olr))
1464 return -EFAULT;
1465 return 0;
1466}
1467
1468/* The IP multicast ioctl support routines. */
1469int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1470{
1471 struct sioc_sg_req sr;
1472 struct sioc_vif_req vr;
1473 struct vif_device *vif;
1474 struct mfc_cache *c;
1475 struct net *net = sock_net(sk);
1476 struct mr_table *mrt;
1477
1478 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1479 if (!mrt)
1480 return -ENOENT;
1481
1482 switch (cmd) {
1483 case SIOCGETVIFCNT:
1484 if (copy_from_user(&vr, arg, sizeof(vr)))
1485 return -EFAULT;
1486 if (vr.vifi >= mrt->maxvif)
1487 return -EINVAL;
1488 read_lock(&mrt_lock);
1489 vif = &mrt->vif_table[vr.vifi];
1490 if (VIF_EXISTS(mrt, vr.vifi)) {
1491 vr.icount = vif->pkt_in;
1492 vr.ocount = vif->pkt_out;
1493 vr.ibytes = vif->bytes_in;
1494 vr.obytes = vif->bytes_out;
1495 read_unlock(&mrt_lock);
1496
1497 if (copy_to_user(arg, &vr, sizeof(vr)))
1498 return -EFAULT;
1499 return 0;
1500 }
1501 read_unlock(&mrt_lock);
1502 return -EADDRNOTAVAIL;
1503 case SIOCGETSGCNT:
1504 if (copy_from_user(&sr, arg, sizeof(sr)))
1505 return -EFAULT;
1506
1507 rcu_read_lock();
1508 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1509 if (c) {
1510 sr.pktcnt = c->mfc_un.res.pkt;
1511 sr.bytecnt = c->mfc_un.res.bytes;
1512 sr.wrong_if = c->mfc_un.res.wrong_if;
1513 rcu_read_unlock();
1514
1515 if (copy_to_user(arg, &sr, sizeof(sr)))
1516 return -EFAULT;
1517 return 0;
1518 }
1519 rcu_read_unlock();
1520 return -EADDRNOTAVAIL;
1521 default:
1522 return -ENOIOCTLCMD;
1523 }
1524}
1525
1526#ifdef CONFIG_COMPAT
1527struct compat_sioc_sg_req {
1528 struct in_addr src;
1529 struct in_addr grp;
1530 compat_ulong_t pktcnt;
1531 compat_ulong_t bytecnt;
1532 compat_ulong_t wrong_if;
1533};
1534
1535struct compat_sioc_vif_req {
1536 vifi_t vifi; /* Which iface */
1537 compat_ulong_t icount;
1538 compat_ulong_t ocount;
1539 compat_ulong_t ibytes;
1540 compat_ulong_t obytes;
1541};
1542
1543int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1544{
1545 struct compat_sioc_sg_req sr;
1546 struct compat_sioc_vif_req vr;
1547 struct vif_device *vif;
1548 struct mfc_cache *c;
1549 struct net *net = sock_net(sk);
1550 struct mr_table *mrt;
1551
1552 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1553 if (!mrt)
1554 return -ENOENT;
1555
1556 switch (cmd) {
1557 case SIOCGETVIFCNT:
1558 if (copy_from_user(&vr, arg, sizeof(vr)))
1559 return -EFAULT;
1560 if (vr.vifi >= mrt->maxvif)
1561 return -EINVAL;
1562 read_lock(&mrt_lock);
1563 vif = &mrt->vif_table[vr.vifi];
1564 if (VIF_EXISTS(mrt, vr.vifi)) {
1565 vr.icount = vif->pkt_in;
1566 vr.ocount = vif->pkt_out;
1567 vr.ibytes = vif->bytes_in;
1568 vr.obytes = vif->bytes_out;
1569 read_unlock(&mrt_lock);
1570
1571 if (copy_to_user(arg, &vr, sizeof(vr)))
1572 return -EFAULT;
1573 return 0;
1574 }
1575 read_unlock(&mrt_lock);
1576 return -EADDRNOTAVAIL;
1577 case SIOCGETSGCNT:
1578 if (copy_from_user(&sr, arg, sizeof(sr)))
1579 return -EFAULT;
1580
1581 rcu_read_lock();
1582 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1583 if (c) {
1584 sr.pktcnt = c->mfc_un.res.pkt;
1585 sr.bytecnt = c->mfc_un.res.bytes;
1586 sr.wrong_if = c->mfc_un.res.wrong_if;
1587 rcu_read_unlock();
1588
1589 if (copy_to_user(arg, &sr, sizeof(sr)))
1590 return -EFAULT;
1591 return 0;
1592 }
1593 rcu_read_unlock();
1594 return -EADDRNOTAVAIL;
1595 default:
1596 return -ENOIOCTLCMD;
1597 }
1598}
1599#endif
1600
1601static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1602{
1603 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1604 struct net *net = dev_net(dev);
1605 struct mr_table *mrt;
1606 struct vif_device *v;
1607 int ct;
1608
1609 if (event != NETDEV_UNREGISTER)
1610 return NOTIFY_DONE;
1611
1612 ipmr_for_each_table(mrt, net) {
1613 v = &mrt->vif_table[0];
1614 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1615 if (v->dev == dev)
1616 vif_delete(mrt, ct, 1, NULL);
1617 }
1618 }
1619 return NOTIFY_DONE;
1620}
1621
1622static struct notifier_block ip_mr_notifier = {
1623 .notifier_call = ipmr_device_event,
1624};
1625
1626/* Encapsulate a packet by attaching a valid IPIP header to it.
1627 * This avoids tunnel drivers and other mess and gives us the speed so
1628 * important for multicast video.
1629 */
1630static void ip_encap(struct net *net, struct sk_buff *skb,
1631 __be32 saddr, __be32 daddr)
1632{
1633 struct iphdr *iph;
1634 const struct iphdr *old_iph = ip_hdr(skb);
1635
1636 skb_push(skb, sizeof(struct iphdr));
1637 skb->transport_header = skb->network_header;
1638 skb_reset_network_header(skb);
1639 iph = ip_hdr(skb);
1640
1641 iph->version = 4;
1642 iph->tos = old_iph->tos;
1643 iph->ttl = old_iph->ttl;
1644 iph->frag_off = 0;
1645 iph->daddr = daddr;
1646 iph->saddr = saddr;
1647 iph->protocol = IPPROTO_IPIP;
1648 iph->ihl = 5;
1649 iph->tot_len = htons(skb->len);
1650 ip_select_ident(net, skb, NULL);
1651 ip_send_check(iph);
1652
1653 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1654 nf_reset(skb);
1655}
1656
1657static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1658 struct sk_buff *skb)
1659{
1660 struct ip_options *opt = &(IPCB(skb)->opt);
1661
1662 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1663 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1664
1665 if (unlikely(opt->optlen))
1666 ip_forward_options(skb);
1667
1668 return dst_output(net, sk, skb);
1669}
1670
1671/* Processing handlers for ipmr_forward */
1672
1673static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1674 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1675{
1676 const struct iphdr *iph = ip_hdr(skb);
1677 struct vif_device *vif = &mrt->vif_table[vifi];
1678 struct net_device *dev;
1679 struct rtable *rt;
1680 struct flowi4 fl4;
1681 int encap = 0;
1682
1683 if (!vif->dev)
1684 goto out_free;
1685
1686 if (vif->flags & VIFF_REGISTER) {
1687 vif->pkt_out++;
1688 vif->bytes_out += skb->len;
1689 vif->dev->stats.tx_bytes += skb->len;
1690 vif->dev->stats.tx_packets++;
1691 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1692 goto out_free;
1693 }
1694
1695 if (vif->flags & VIFF_TUNNEL) {
1696 rt = ip_route_output_ports(net, &fl4, NULL,
1697 vif->remote, vif->local,
1698 0, 0,
1699 IPPROTO_IPIP,
1700 RT_TOS(iph->tos), vif->link);
1701 if (IS_ERR(rt))
1702 goto out_free;
1703 encap = sizeof(struct iphdr);
1704 } else {
1705 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1706 0, 0,
1707 IPPROTO_IPIP,
1708 RT_TOS(iph->tos), vif->link);
1709 if (IS_ERR(rt))
1710 goto out_free;
1711 }
1712
1713 dev = rt->dst.dev;
1714
1715 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1716 /* Do not fragment multicasts. Alas, IPv4 does not
1717 * allow to send ICMP, so that packets will disappear
1718 * to blackhole.
1719 */
1720 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1721 ip_rt_put(rt);
1722 goto out_free;
1723 }
1724
1725 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1726
1727 if (skb_cow(skb, encap)) {
1728 ip_rt_put(rt);
1729 goto out_free;
1730 }
1731
1732 vif->pkt_out++;
1733 vif->bytes_out += skb->len;
1734
1735 skb_dst_drop(skb);
1736 skb_dst_set(skb, &rt->dst);
1737 ip_decrease_ttl(ip_hdr(skb));
1738
1739 /* FIXME: forward and output firewalls used to be called here.
1740 * What do we do with netfilter? -- RR
1741 */
1742 if (vif->flags & VIFF_TUNNEL) {
1743 ip_encap(net, skb, vif->local, vif->remote);
1744 /* FIXME: extra output firewall step used to be here. --RR */
1745 vif->dev->stats.tx_packets++;
1746 vif->dev->stats.tx_bytes += skb->len;
1747 }
1748
1749 IPCB(skb)->flags |= IPSKB_FORWARDED;
1750
1751 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1752 * not only before forwarding, but after forwarding on all output
1753 * interfaces. It is clear, if mrouter runs a multicasting
1754 * program, it should receive packets not depending to what interface
1755 * program is joined.
1756 * If we will not make it, the program will have to join on all
1757 * interfaces. On the other hand, multihoming host (or router, but
1758 * not mrouter) cannot join to more than one interface - it will
1759 * result in receiving multiple packets.
1760 */
1761 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1762 net, NULL, skb, skb->dev, dev,
1763 ipmr_forward_finish);
1764 return;
1765
1766out_free:
1767 kfree_skb(skb);
1768}
1769
1770static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1771{
1772 int ct;
1773
1774 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1775 if (mrt->vif_table[ct].dev == dev)
1776 break;
1777 }
1778 return ct;
1779}
1780
1781/* "local" means that we should preserve one skb (for local delivery) */
1782static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1783 struct sk_buff *skb, struct mfc_cache *cache,
1784 int local)
1785{
1786 int psend = -1;
1787 int vif, ct;
1788 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1789
1790 vif = cache->mfc_parent;
1791 cache->mfc_un.res.pkt++;
1792 cache->mfc_un.res.bytes += skb->len;
1793
1794 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1795 struct mfc_cache *cache_proxy;
1796
1797 /* For an (*,G) entry, we only check that the incomming
1798 * interface is part of the static tree.
1799 */
1800 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1801 if (cache_proxy &&
1802 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1803 goto forward;
1804 }
1805
1806 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1807 if (mrt->vif_table[vif].dev != skb->dev) {
1808 if (rt_is_output_route(skb_rtable(skb))) {
1809 /* It is our own packet, looped back.
1810 * Very complicated situation...
1811 *
1812 * The best workaround until routing daemons will be
1813 * fixed is not to redistribute packet, if it was
1814 * send through wrong interface. It means, that
1815 * multicast applications WILL NOT work for
1816 * (S,G), which have default multicast route pointing
1817 * to wrong oif. In any case, it is not a good
1818 * idea to use multicasting applications on router.
1819 */
1820 goto dont_forward;
1821 }
1822
1823 cache->mfc_un.res.wrong_if++;
1824
1825 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1826 /* pimsm uses asserts, when switching from RPT to SPT,
1827 * so that we cannot check that packet arrived on an oif.
1828 * It is bad, but otherwise we would need to move pretty
1829 * large chunk of pimd to kernel. Ough... --ANK
1830 */
1831 (mrt->mroute_do_pim ||
1832 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1833 time_after(jiffies,
1834 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1835 cache->mfc_un.res.last_assert = jiffies;
1836 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1837 }
1838 goto dont_forward;
1839 }
1840
1841forward:
1842 mrt->vif_table[vif].pkt_in++;
1843 mrt->vif_table[vif].bytes_in += skb->len;
1844
1845 /* Forward the frame */
1846 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1847 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1848 if (true_vifi >= 0 &&
1849 true_vifi != cache->mfc_parent &&
1850 ip_hdr(skb)->ttl >
1851 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1852 /* It's an (*,*) entry and the packet is not coming from
1853 * the upstream: forward the packet to the upstream
1854 * only.
1855 */
1856 psend = cache->mfc_parent;
1857 goto last_forward;
1858 }
1859 goto dont_forward;
1860 }
1861 for (ct = cache->mfc_un.res.maxvif - 1;
1862 ct >= cache->mfc_un.res.minvif; ct--) {
1863 /* For (*,G) entry, don't forward to the incoming interface */
1864 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1865 ct != true_vifi) &&
1866 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1867 if (psend != -1) {
1868 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1869
1870 if (skb2)
1871 ipmr_queue_xmit(net, mrt, skb2, cache,
1872 psend);
1873 }
1874 psend = ct;
1875 }
1876 }
1877last_forward:
1878 if (psend != -1) {
1879 if (local) {
1880 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1881
1882 if (skb2)
1883 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1884 } else {
1885 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1886 return;
1887 }
1888 }
1889
1890dont_forward:
1891 if (!local)
1892 kfree_skb(skb);
1893}
1894
1895static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1896{
1897 struct rtable *rt = skb_rtable(skb);
1898 struct iphdr *iph = ip_hdr(skb);
1899 struct flowi4 fl4 = {
1900 .daddr = iph->daddr,
1901 .saddr = iph->saddr,
1902 .flowi4_tos = RT_TOS(iph->tos),
1903 .flowi4_oif = (rt_is_output_route(rt) ?
1904 skb->dev->ifindex : 0),
1905 .flowi4_iif = (rt_is_output_route(rt) ?
1906 LOOPBACK_IFINDEX :
1907 skb->dev->ifindex),
1908 .flowi4_mark = skb->mark,
1909 };
1910 struct mr_table *mrt;
1911 int err;
1912
1913 err = ipmr_fib_lookup(net, &fl4, &mrt);
1914 if (err)
1915 return ERR_PTR(err);
1916 return mrt;
1917}
1918
1919/* Multicast packets for forwarding arrive here
1920 * Called with rcu_read_lock();
1921 */
1922int ip_mr_input(struct sk_buff *skb)
1923{
1924 struct mfc_cache *cache;
1925 struct net *net = dev_net(skb->dev);
1926 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1927 struct mr_table *mrt;
1928
1929 /* Packet is looped back after forward, it should not be
1930 * forwarded second time, but still can be delivered locally.
1931 */
1932 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1933 goto dont_forward;
1934
1935 mrt = ipmr_rt_fib_lookup(net, skb);
1936 if (IS_ERR(mrt)) {
1937 kfree_skb(skb);
1938 return PTR_ERR(mrt);
1939 }
1940 if (!local) {
1941 if (IPCB(skb)->opt.router_alert) {
1942 if (ip_call_ra_chain(skb))
1943 return 0;
1944 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1945 /* IGMPv1 (and broken IGMPv2 implementations sort of
1946 * Cisco IOS <= 11.2(8)) do not put router alert
1947 * option to IGMP packets destined to routable
1948 * groups. It is very bad, because it means
1949 * that we can forward NO IGMP messages.
1950 */
1951 struct sock *mroute_sk;
1952
1953 mroute_sk = rcu_dereference(mrt->mroute_sk);
1954 if (mroute_sk) {
1955 nf_reset(skb);
1956 raw_rcv(mroute_sk, skb);
1957 return 0;
1958 }
1959 }
1960 }
1961
1962 /* already under rcu_read_lock() */
1963 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1964 if (!cache) {
1965 int vif = ipmr_find_vif(mrt, skb->dev);
1966
1967 if (vif >= 0)
1968 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1969 vif);
1970 }
1971
1972 /* No usable cache entry */
1973 if (!cache) {
1974 int vif;
1975
1976 if (local) {
1977 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1978 ip_local_deliver(skb);
1979 if (!skb2)
1980 return -ENOBUFS;
1981 skb = skb2;
1982 }
1983
1984 read_lock(&mrt_lock);
1985 vif = ipmr_find_vif(mrt, skb->dev);
1986 if (vif >= 0) {
1987 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1988 read_unlock(&mrt_lock);
1989
1990 return err2;
1991 }
1992 read_unlock(&mrt_lock);
1993 kfree_skb(skb);
1994 return -ENODEV;
1995 }
1996
1997 read_lock(&mrt_lock);
1998 ip_mr_forward(net, mrt, skb, cache, local);
1999 read_unlock(&mrt_lock);
2000
2001 if (local)
2002 return ip_local_deliver(skb);
2003
2004 return 0;
2005
2006dont_forward:
2007 if (local)
2008 return ip_local_deliver(skb);
2009 kfree_skb(skb);
2010 return 0;
2011}
2012
2013#ifdef CONFIG_IP_PIMSM_V1
2014/* Handle IGMP messages of PIMv1 */
2015int pim_rcv_v1(struct sk_buff *skb)
2016{
2017 struct igmphdr *pim;
2018 struct net *net = dev_net(skb->dev);
2019 struct mr_table *mrt;
2020
2021 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2022 goto drop;
2023
2024 pim = igmp_hdr(skb);
2025
2026 mrt = ipmr_rt_fib_lookup(net, skb);
2027 if (IS_ERR(mrt))
2028 goto drop;
2029 if (!mrt->mroute_do_pim ||
2030 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2031 goto drop;
2032
2033 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2034drop:
2035 kfree_skb(skb);
2036 }
2037 return 0;
2038}
2039#endif
2040
2041#ifdef CONFIG_IP_PIMSM_V2
2042static int pim_rcv(struct sk_buff *skb)
2043{
2044 struct pimreghdr *pim;
2045 struct net *net = dev_net(skb->dev);
2046 struct mr_table *mrt;
2047
2048 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2049 goto drop;
2050
2051 pim = (struct pimreghdr *)skb_transport_header(skb);
2052 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2053 (pim->flags & PIM_NULL_REGISTER) ||
2054 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2055 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2056 goto drop;
2057
2058 mrt = ipmr_rt_fib_lookup(net, skb);
2059 if (IS_ERR(mrt))
2060 goto drop;
2061 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2062drop:
2063 kfree_skb(skb);
2064 }
2065 return 0;
2066}
2067#endif
2068
2069static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2070 struct mfc_cache *c, struct rtmsg *rtm)
2071{
2072 int ct;
2073 struct rtnexthop *nhp;
2074 struct nlattr *mp_attr;
2075 struct rta_mfc_stats mfcs;
2076
2077 /* If cache is unresolved, don't try to parse IIF and OIF */
2078 if (c->mfc_parent >= MAXVIFS)
2079 return -ENOENT;
2080
2081 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2082 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2083 return -EMSGSIZE;
2084
2085 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2086 return -EMSGSIZE;
2087
2088 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2089 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2090 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2091 nla_nest_cancel(skb, mp_attr);
2092 return -EMSGSIZE;
2093 }
2094
2095 nhp->rtnh_flags = 0;
2096 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2097 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2098 nhp->rtnh_len = sizeof(*nhp);
2099 }
2100 }
2101
2102 nla_nest_end(skb, mp_attr);
2103
2104 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2105 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2106 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2107 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2108 return -EMSGSIZE;
2109
2110 rtm->rtm_type = RTN_MULTICAST;
2111 return 1;
2112}
2113
2114int ipmr_get_route(struct net *net, struct sk_buff *skb,
2115 __be32 saddr, __be32 daddr,
2116 struct rtmsg *rtm, int nowait)
2117{
2118 struct mfc_cache *cache;
2119 struct mr_table *mrt;
2120 int err;
2121
2122 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2123 if (!mrt)
2124 return -ENOENT;
2125
2126 rcu_read_lock();
2127 cache = ipmr_cache_find(mrt, saddr, daddr);
2128 if (!cache && skb->dev) {
2129 int vif = ipmr_find_vif(mrt, skb->dev);
2130
2131 if (vif >= 0)
2132 cache = ipmr_cache_find_any(mrt, daddr, vif);
2133 }
2134 if (!cache) {
2135 struct sk_buff *skb2;
2136 struct iphdr *iph;
2137 struct net_device *dev;
2138 int vif = -1;
2139
2140 if (nowait) {
2141 rcu_read_unlock();
2142 return -EAGAIN;
2143 }
2144
2145 dev = skb->dev;
2146 read_lock(&mrt_lock);
2147 if (dev)
2148 vif = ipmr_find_vif(mrt, dev);
2149 if (vif < 0) {
2150 read_unlock(&mrt_lock);
2151 rcu_read_unlock();
2152 return -ENODEV;
2153 }
2154 skb2 = skb_clone(skb, GFP_ATOMIC);
2155 if (!skb2) {
2156 read_unlock(&mrt_lock);
2157 rcu_read_unlock();
2158 return -ENOMEM;
2159 }
2160
2161 skb_push(skb2, sizeof(struct iphdr));
2162 skb_reset_network_header(skb2);
2163 iph = ip_hdr(skb2);
2164 iph->ihl = sizeof(struct iphdr) >> 2;
2165 iph->saddr = saddr;
2166 iph->daddr = daddr;
2167 iph->version = 0;
2168 err = ipmr_cache_unresolved(mrt, vif, skb2);
2169 read_unlock(&mrt_lock);
2170 rcu_read_unlock();
2171 return err;
2172 }
2173
2174 read_lock(&mrt_lock);
2175 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2176 read_unlock(&mrt_lock);
2177 rcu_read_unlock();
2178 return err;
2179}
2180
2181static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2182 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2183 int flags)
2184{
2185 struct nlmsghdr *nlh;
2186 struct rtmsg *rtm;
2187 int err;
2188
2189 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2190 if (!nlh)
2191 return -EMSGSIZE;
2192
2193 rtm = nlmsg_data(nlh);
2194 rtm->rtm_family = RTNL_FAMILY_IPMR;
2195 rtm->rtm_dst_len = 32;
2196 rtm->rtm_src_len = 32;
2197 rtm->rtm_tos = 0;
2198 rtm->rtm_table = mrt->id;
2199 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2200 goto nla_put_failure;
2201 rtm->rtm_type = RTN_MULTICAST;
2202 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2203 if (c->mfc_flags & MFC_STATIC)
2204 rtm->rtm_protocol = RTPROT_STATIC;
2205 else
2206 rtm->rtm_protocol = RTPROT_MROUTED;
2207 rtm->rtm_flags = 0;
2208
2209 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2210 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2211 goto nla_put_failure;
2212 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2213 /* do not break the dump if cache is unresolved */
2214 if (err < 0 && err != -ENOENT)
2215 goto nla_put_failure;
2216
2217 nlmsg_end(skb, nlh);
2218 return 0;
2219
2220nla_put_failure:
2221 nlmsg_cancel(skb, nlh);
2222 return -EMSGSIZE;
2223}
2224
2225static size_t mroute_msgsize(bool unresolved, int maxvif)
2226{
2227 size_t len =
2228 NLMSG_ALIGN(sizeof(struct rtmsg))
2229 + nla_total_size(4) /* RTA_TABLE */
2230 + nla_total_size(4) /* RTA_SRC */
2231 + nla_total_size(4) /* RTA_DST */
2232 ;
2233
2234 if (!unresolved)
2235 len = len
2236 + nla_total_size(4) /* RTA_IIF */
2237 + nla_total_size(0) /* RTA_MULTIPATH */
2238 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2239 /* RTA_MFC_STATS */
2240 + nla_total_size(sizeof(struct rta_mfc_stats))
2241 ;
2242
2243 return len;
2244}
2245
2246static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2247 int cmd)
2248{
2249 struct net *net = read_pnet(&mrt->net);
2250 struct sk_buff *skb;
2251 int err = -ENOBUFS;
2252
2253 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2254 GFP_ATOMIC);
2255 if (!skb)
2256 goto errout;
2257
2258 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2259 if (err < 0)
2260 goto errout;
2261
2262 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2263 return;
2264
2265errout:
2266 kfree_skb(skb);
2267 if (err < 0)
2268 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2269}
2270
2271static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2272{
2273 struct net *net = sock_net(skb->sk);
2274 struct mr_table *mrt;
2275 struct mfc_cache *mfc;
2276 unsigned int t = 0, s_t;
2277 unsigned int h = 0, s_h;
2278 unsigned int e = 0, s_e;
2279
2280 s_t = cb->args[0];
2281 s_h = cb->args[1];
2282 s_e = cb->args[2];
2283
2284 rcu_read_lock();
2285 ipmr_for_each_table(mrt, net) {
2286 if (t < s_t)
2287 goto next_table;
2288 if (t > s_t)
2289 s_h = 0;
2290 for (h = s_h; h < MFC_LINES; h++) {
2291 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2292 if (e < s_e)
2293 goto next_entry;
2294 if (ipmr_fill_mroute(mrt, skb,
2295 NETLINK_CB(cb->skb).portid,
2296 cb->nlh->nlmsg_seq,
2297 mfc, RTM_NEWROUTE,
2298 NLM_F_MULTI) < 0)
2299 goto done;
2300next_entry:
2301 e++;
2302 }
2303 e = s_e = 0;
2304 }
2305 spin_lock_bh(&mfc_unres_lock);
2306 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2307 if (e < s_e)
2308 goto next_entry2;
2309 if (ipmr_fill_mroute(mrt, skb,
2310 NETLINK_CB(cb->skb).portid,
2311 cb->nlh->nlmsg_seq,
2312 mfc, RTM_NEWROUTE,
2313 NLM_F_MULTI) < 0) {
2314 spin_unlock_bh(&mfc_unres_lock);
2315 goto done;
2316 }
2317next_entry2:
2318 e++;
2319 }
2320 spin_unlock_bh(&mfc_unres_lock);
2321 e = s_e = 0;
2322 s_h = 0;
2323next_table:
2324 t++;
2325 }
2326done:
2327 rcu_read_unlock();
2328
2329 cb->args[2] = e;
2330 cb->args[1] = h;
2331 cb->args[0] = t;
2332
2333 return skb->len;
2334}
2335
2336static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2337 [RTA_SRC] = { .type = NLA_U32 },
2338 [RTA_DST] = { .type = NLA_U32 },
2339 [RTA_IIF] = { .type = NLA_U32 },
2340 [RTA_TABLE] = { .type = NLA_U32 },
2341 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2342};
2343
2344static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2345{
2346 switch (rtm_protocol) {
2347 case RTPROT_STATIC:
2348 case RTPROT_MROUTED:
2349 return true;
2350 }
2351 return false;
2352}
2353
2354static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2355{
2356 struct rtnexthop *rtnh = nla_data(nla);
2357 int remaining = nla_len(nla), vifi = 0;
2358
2359 while (rtnh_ok(rtnh, remaining)) {
2360 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2361 if (++vifi == MAXVIFS)
2362 break;
2363 rtnh = rtnh_next(rtnh, &remaining);
2364 }
2365
2366 return remaining > 0 ? -EINVAL : vifi;
2367}
2368
2369/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2370static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2371 struct mfcctl *mfcc, int *mrtsock,
2372 struct mr_table **mrtret)
2373{
2374 struct net_device *dev = NULL;
2375 u32 tblid = RT_TABLE_DEFAULT;
2376 struct mr_table *mrt;
2377 struct nlattr *attr;
2378 struct rtmsg *rtm;
2379 int ret, rem;
2380
2381 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy);
2382 if (ret < 0)
2383 goto out;
2384 rtm = nlmsg_data(nlh);
2385
2386 ret = -EINVAL;
2387 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2388 rtm->rtm_type != RTN_MULTICAST ||
2389 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2390 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2391 goto out;
2392
2393 memset(mfcc, 0, sizeof(*mfcc));
2394 mfcc->mfcc_parent = -1;
2395 ret = 0;
2396 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2397 switch (nla_type(attr)) {
2398 case RTA_SRC:
2399 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2400 break;
2401 case RTA_DST:
2402 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2403 break;
2404 case RTA_IIF:
2405 dev = __dev_get_by_index(net, nla_get_u32(attr));
2406 if (!dev) {
2407 ret = -ENODEV;
2408 goto out;
2409 }
2410 break;
2411 case RTA_MULTIPATH:
2412 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2413 ret = -EINVAL;
2414 goto out;
2415 }
2416 break;
2417 case RTA_PREFSRC:
2418 ret = 1;
2419 break;
2420 case RTA_TABLE:
2421 tblid = nla_get_u32(attr);
2422 break;
2423 }
2424 }
2425 mrt = ipmr_get_table(net, tblid);
2426 if (!mrt) {
2427 ret = -ENOENT;
2428 goto out;
2429 }
2430 *mrtret = mrt;
2431 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2432 if (dev)
2433 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2434
2435out:
2436 return ret;
2437}
2438
2439/* takes care of both newroute and delroute */
2440static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh)
2441{
2442 struct net *net = sock_net(skb->sk);
2443 int ret, mrtsock, parent;
2444 struct mr_table *tbl;
2445 struct mfcctl mfcc;
2446
2447 mrtsock = 0;
2448 tbl = NULL;
2449 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl);
2450 if (ret < 0)
2451 return ret;
2452
2453 parent = ret ? mfcc.mfcc_parent : -1;
2454 if (nlh->nlmsg_type == RTM_NEWROUTE)
2455 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2456 else
2457 return ipmr_mfc_delete(tbl, &mfcc, parent);
2458}
2459
2460#ifdef CONFIG_PROC_FS
2461/* The /proc interfaces to multicast routing :
2462 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2463 */
2464struct ipmr_vif_iter {
2465 struct seq_net_private p;
2466 struct mr_table *mrt;
2467 int ct;
2468};
2469
2470static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2471 struct ipmr_vif_iter *iter,
2472 loff_t pos)
2473{
2474 struct mr_table *mrt = iter->mrt;
2475
2476 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2477 if (!VIF_EXISTS(mrt, iter->ct))
2478 continue;
2479 if (pos-- == 0)
2480 return &mrt->vif_table[iter->ct];
2481 }
2482 return NULL;
2483}
2484
2485static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2486 __acquires(mrt_lock)
2487{
2488 struct ipmr_vif_iter *iter = seq->private;
2489 struct net *net = seq_file_net(seq);
2490 struct mr_table *mrt;
2491
2492 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2493 if (!mrt)
2494 return ERR_PTR(-ENOENT);
2495
2496 iter->mrt = mrt;
2497
2498 read_lock(&mrt_lock);
2499 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2500 : SEQ_START_TOKEN;
2501}
2502
2503static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2504{
2505 struct ipmr_vif_iter *iter = seq->private;
2506 struct net *net = seq_file_net(seq);
2507 struct mr_table *mrt = iter->mrt;
2508
2509 ++*pos;
2510 if (v == SEQ_START_TOKEN)
2511 return ipmr_vif_seq_idx(net, iter, 0);
2512
2513 while (++iter->ct < mrt->maxvif) {
2514 if (!VIF_EXISTS(mrt, iter->ct))
2515 continue;
2516 return &mrt->vif_table[iter->ct];
2517 }
2518 return NULL;
2519}
2520
2521static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2522 __releases(mrt_lock)
2523{
2524 read_unlock(&mrt_lock);
2525}
2526
2527static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2528{
2529 struct ipmr_vif_iter *iter = seq->private;
2530 struct mr_table *mrt = iter->mrt;
2531
2532 if (v == SEQ_START_TOKEN) {
2533 seq_puts(seq,
2534 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2535 } else {
2536 const struct vif_device *vif = v;
2537 const char *name = vif->dev ? vif->dev->name : "none";
2538
2539 seq_printf(seq,
2540 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2541 vif - mrt->vif_table,
2542 name, vif->bytes_in, vif->pkt_in,
2543 vif->bytes_out, vif->pkt_out,
2544 vif->flags, vif->local, vif->remote);
2545 }
2546 return 0;
2547}
2548
2549static const struct seq_operations ipmr_vif_seq_ops = {
2550 .start = ipmr_vif_seq_start,
2551 .next = ipmr_vif_seq_next,
2552 .stop = ipmr_vif_seq_stop,
2553 .show = ipmr_vif_seq_show,
2554};
2555
2556static int ipmr_vif_open(struct inode *inode, struct file *file)
2557{
2558 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2559 sizeof(struct ipmr_vif_iter));
2560}
2561
2562static const struct file_operations ipmr_vif_fops = {
2563 .owner = THIS_MODULE,
2564 .open = ipmr_vif_open,
2565 .read = seq_read,
2566 .llseek = seq_lseek,
2567 .release = seq_release_net,
2568};
2569
2570struct ipmr_mfc_iter {
2571 struct seq_net_private p;
2572 struct mr_table *mrt;
2573 struct list_head *cache;
2574 int ct;
2575};
2576
2577
2578static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2579 struct ipmr_mfc_iter *it, loff_t pos)
2580{
2581 struct mr_table *mrt = it->mrt;
2582 struct mfc_cache *mfc;
2583
2584 rcu_read_lock();
2585 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2586 it->cache = &mrt->mfc_cache_array[it->ct];
2587 list_for_each_entry_rcu(mfc, it->cache, list)
2588 if (pos-- == 0)
2589 return mfc;
2590 }
2591 rcu_read_unlock();
2592
2593 spin_lock_bh(&mfc_unres_lock);
2594 it->cache = &mrt->mfc_unres_queue;
2595 list_for_each_entry(mfc, it->cache, list)
2596 if (pos-- == 0)
2597 return mfc;
2598 spin_unlock_bh(&mfc_unres_lock);
2599
2600 it->cache = NULL;
2601 return NULL;
2602}
2603
2604
2605static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2606{
2607 struct ipmr_mfc_iter *it = seq->private;
2608 struct net *net = seq_file_net(seq);
2609 struct mr_table *mrt;
2610
2611 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2612 if (!mrt)
2613 return ERR_PTR(-ENOENT);
2614
2615 it->mrt = mrt;
2616 it->cache = NULL;
2617 it->ct = 0;
2618 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2619 : SEQ_START_TOKEN;
2620}
2621
2622static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2623{
2624 struct mfc_cache *mfc = v;
2625 struct ipmr_mfc_iter *it = seq->private;
2626 struct net *net = seq_file_net(seq);
2627 struct mr_table *mrt = it->mrt;
2628
2629 ++*pos;
2630
2631 if (v == SEQ_START_TOKEN)
2632 return ipmr_mfc_seq_idx(net, seq->private, 0);
2633
2634 if (mfc->list.next != it->cache)
2635 return list_entry(mfc->list.next, struct mfc_cache, list);
2636
2637 if (it->cache == &mrt->mfc_unres_queue)
2638 goto end_of_list;
2639
2640 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2641
2642 while (++it->ct < MFC_LINES) {
2643 it->cache = &mrt->mfc_cache_array[it->ct];
2644 if (list_empty(it->cache))
2645 continue;
2646 return list_first_entry(it->cache, struct mfc_cache, list);
2647 }
2648
2649 /* exhausted cache_array, show unresolved */
2650 rcu_read_unlock();
2651 it->cache = &mrt->mfc_unres_queue;
2652 it->ct = 0;
2653
2654 spin_lock_bh(&mfc_unres_lock);
2655 if (!list_empty(it->cache))
2656 return list_first_entry(it->cache, struct mfc_cache, list);
2657
2658end_of_list:
2659 spin_unlock_bh(&mfc_unres_lock);
2660 it->cache = NULL;
2661
2662 return NULL;
2663}
2664
2665static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2666{
2667 struct ipmr_mfc_iter *it = seq->private;
2668 struct mr_table *mrt = it->mrt;
2669
2670 if (it->cache == &mrt->mfc_unres_queue)
2671 spin_unlock_bh(&mfc_unres_lock);
2672 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2673 rcu_read_unlock();
2674}
2675
2676static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2677{
2678 int n;
2679
2680 if (v == SEQ_START_TOKEN) {
2681 seq_puts(seq,
2682 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2683 } else {
2684 const struct mfc_cache *mfc = v;
2685 const struct ipmr_mfc_iter *it = seq->private;
2686 const struct mr_table *mrt = it->mrt;
2687
2688 seq_printf(seq, "%08X %08X %-3hd",
2689 (__force u32) mfc->mfc_mcastgrp,
2690 (__force u32) mfc->mfc_origin,
2691 mfc->mfc_parent);
2692
2693 if (it->cache != &mrt->mfc_unres_queue) {
2694 seq_printf(seq, " %8lu %8lu %8lu",
2695 mfc->mfc_un.res.pkt,
2696 mfc->mfc_un.res.bytes,
2697 mfc->mfc_un.res.wrong_if);
2698 for (n = mfc->mfc_un.res.minvif;
2699 n < mfc->mfc_un.res.maxvif; n++) {
2700 if (VIF_EXISTS(mrt, n) &&
2701 mfc->mfc_un.res.ttls[n] < 255)
2702 seq_printf(seq,
2703 " %2d:%-3d",
2704 n, mfc->mfc_un.res.ttls[n]);
2705 }
2706 } else {
2707 /* unresolved mfc_caches don't contain
2708 * pkt, bytes and wrong_if values
2709 */
2710 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2711 }
2712 seq_putc(seq, '\n');
2713 }
2714 return 0;
2715}
2716
2717static const struct seq_operations ipmr_mfc_seq_ops = {
2718 .start = ipmr_mfc_seq_start,
2719 .next = ipmr_mfc_seq_next,
2720 .stop = ipmr_mfc_seq_stop,
2721 .show = ipmr_mfc_seq_show,
2722};
2723
2724static int ipmr_mfc_open(struct inode *inode, struct file *file)
2725{
2726 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2727 sizeof(struct ipmr_mfc_iter));
2728}
2729
2730static const struct file_operations ipmr_mfc_fops = {
2731 .owner = THIS_MODULE,
2732 .open = ipmr_mfc_open,
2733 .read = seq_read,
2734 .llseek = seq_lseek,
2735 .release = seq_release_net,
2736};
2737#endif
2738
2739#ifdef CONFIG_IP_PIMSM_V2
2740static const struct net_protocol pim_protocol = {
2741 .handler = pim_rcv,
2742 .netns_ok = 1,
2743};
2744#endif
2745
2746/* Setup for IP multicast routing */
2747static int __net_init ipmr_net_init(struct net *net)
2748{
2749 int err;
2750
2751 err = ipmr_rules_init(net);
2752 if (err < 0)
2753 goto fail;
2754
2755#ifdef CONFIG_PROC_FS
2756 err = -ENOMEM;
2757 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2758 goto proc_vif_fail;
2759 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2760 goto proc_cache_fail;
2761#endif
2762 return 0;
2763
2764#ifdef CONFIG_PROC_FS
2765proc_cache_fail:
2766 remove_proc_entry("ip_mr_vif", net->proc_net);
2767proc_vif_fail:
2768 ipmr_rules_exit(net);
2769#endif
2770fail:
2771 return err;
2772}
2773
2774static void __net_exit ipmr_net_exit(struct net *net)
2775{
2776#ifdef CONFIG_PROC_FS
2777 remove_proc_entry("ip_mr_cache", net->proc_net);
2778 remove_proc_entry("ip_mr_vif", net->proc_net);
2779#endif
2780 ipmr_rules_exit(net);
2781}
2782
2783static struct pernet_operations ipmr_net_ops = {
2784 .init = ipmr_net_init,
2785 .exit = ipmr_net_exit,
2786};
2787
2788int __init ip_mr_init(void)
2789{
2790 int err;
2791
2792 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2793 sizeof(struct mfc_cache),
2794 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2795 NULL);
2796
2797 err = register_pernet_subsys(&ipmr_net_ops);
2798 if (err)
2799 goto reg_pernet_fail;
2800
2801 err = register_netdevice_notifier(&ip_mr_notifier);
2802 if (err)
2803 goto reg_notif_fail;
2804#ifdef CONFIG_IP_PIMSM_V2
2805 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2806 pr_err("%s: can't add PIM protocol\n", __func__);
2807 err = -EAGAIN;
2808 goto add_proto_fail;
2809 }
2810#endif
2811 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2812 NULL, ipmr_rtm_dumproute, NULL);
2813 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
2814 ipmr_rtm_route, NULL, NULL);
2815 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
2816 ipmr_rtm_route, NULL, NULL);
2817 return 0;
2818
2819#ifdef CONFIG_IP_PIMSM_V2
2820add_proto_fail:
2821 unregister_netdevice_notifier(&ip_mr_notifier);
2822#endif
2823reg_notif_fail:
2824 unregister_pernet_subsys(&ipmr_net_ops);
2825reg_pernet_fail:
2826 kmem_cache_destroy(mrt_cachep);
2827 return err;
2828}
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
26 *
27 */
28
29#include <asm/uaccess.h>
30#include <linux/types.h>
31#include <linux/capability.h>
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
48#include <linux/if_ether.h>
49#include <linux/slab.h>
50#include <net/net_namespace.h>
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
54#include <net/route.h>
55#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
62#include <linux/compat.h>
63#include <linux/export.h>
64#include <net/ipip.h>
65#include <net/checksum.h>
66#include <net/netlink.h>
67#include <net/fib_rules.h>
68
69#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
70#define CONFIG_IP_PIMSM 1
71#endif
72
73struct mr_table {
74 struct list_head list;
75#ifdef CONFIG_NET_NS
76 struct net *net;
77#endif
78 u32 id;
79 struct sock __rcu *mroute_sk;
80 struct timer_list ipmr_expire_timer;
81 struct list_head mfc_unres_queue;
82 struct list_head mfc_cache_array[MFC_LINES];
83 struct vif_device vif_table[MAXVIFS];
84 int maxvif;
85 atomic_t cache_resolve_queue_len;
86 int mroute_do_assert;
87 int mroute_do_pim;
88#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
89 int mroute_reg_vif_num;
90#endif
91};
92
93struct ipmr_rule {
94 struct fib_rule common;
95};
96
97struct ipmr_result {
98 struct mr_table *mrt;
99};
100
101/* Big lock, protecting vif table, mrt cache and mroute socket state.
102 * Note that the changes are semaphored via rtnl_lock.
103 */
104
105static DEFINE_RWLOCK(mrt_lock);
106
107/*
108 * Multicast router control variables
109 */
110
111#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
112
113/* Special spinlock for queue of unresolved entries */
114static DEFINE_SPINLOCK(mfc_unres_lock);
115
116/* We return to original Alan's scheme. Hash table of resolved
117 * entries is changed only in process context and protected
118 * with weak lock mrt_lock. Queue of unresolved entries is protected
119 * with strong spinlock mfc_unres_lock.
120 *
121 * In this case data path is free of exclusive locks at all.
122 */
123
124static struct kmem_cache *mrt_cachep __read_mostly;
125
126static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127static void ipmr_free_table(struct mr_table *mrt);
128
129static int ip_mr_forward(struct net *net, struct mr_table *mrt,
130 struct sk_buff *skb, struct mfc_cache *cache,
131 int local);
132static int ipmr_cache_report(struct mr_table *mrt,
133 struct sk_buff *pkt, vifi_t vifi, int assert);
134static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
135 struct mfc_cache *c, struct rtmsg *rtm);
136static void mroute_clean_tables(struct mr_table *mrt);
137static void ipmr_expire_process(unsigned long arg);
138
139#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
140#define ipmr_for_each_table(mrt, net) \
141 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
142
143static struct mr_table *ipmr_get_table(struct net *net, u32 id)
144{
145 struct mr_table *mrt;
146
147 ipmr_for_each_table(mrt, net) {
148 if (mrt->id == id)
149 return mrt;
150 }
151 return NULL;
152}
153
154static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
155 struct mr_table **mrt)
156{
157 struct ipmr_result res;
158 struct fib_lookup_arg arg = { .result = &res, };
159 int err;
160
161 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
162 flowi4_to_flowi(flp4), 0, &arg);
163 if (err < 0)
164 return err;
165 *mrt = res.mrt;
166 return 0;
167}
168
169static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
170 int flags, struct fib_lookup_arg *arg)
171{
172 struct ipmr_result *res = arg->result;
173 struct mr_table *mrt;
174
175 switch (rule->action) {
176 case FR_ACT_TO_TBL:
177 break;
178 case FR_ACT_UNREACHABLE:
179 return -ENETUNREACH;
180 case FR_ACT_PROHIBIT:
181 return -EACCES;
182 case FR_ACT_BLACKHOLE:
183 default:
184 return -EINVAL;
185 }
186
187 mrt = ipmr_get_table(rule->fr_net, rule->table);
188 if (mrt == NULL)
189 return -EAGAIN;
190 res->mrt = mrt;
191 return 0;
192}
193
194static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
195{
196 return 1;
197}
198
199static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
200 FRA_GENERIC_POLICY,
201};
202
203static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
204 struct fib_rule_hdr *frh, struct nlattr **tb)
205{
206 return 0;
207}
208
209static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
210 struct nlattr **tb)
211{
212 return 1;
213}
214
215static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
216 struct fib_rule_hdr *frh)
217{
218 frh->dst_len = 0;
219 frh->src_len = 0;
220 frh->tos = 0;
221 return 0;
222}
223
224static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
225 .family = RTNL_FAMILY_IPMR,
226 .rule_size = sizeof(struct ipmr_rule),
227 .addr_size = sizeof(u32),
228 .action = ipmr_rule_action,
229 .match = ipmr_rule_match,
230 .configure = ipmr_rule_configure,
231 .compare = ipmr_rule_compare,
232 .default_pref = fib_default_rule_pref,
233 .fill = ipmr_rule_fill,
234 .nlgroup = RTNLGRP_IPV4_RULE,
235 .policy = ipmr_rule_policy,
236 .owner = THIS_MODULE,
237};
238
239static int __net_init ipmr_rules_init(struct net *net)
240{
241 struct fib_rules_ops *ops;
242 struct mr_table *mrt;
243 int err;
244
245 ops = fib_rules_register(&ipmr_rules_ops_template, net);
246 if (IS_ERR(ops))
247 return PTR_ERR(ops);
248
249 INIT_LIST_HEAD(&net->ipv4.mr_tables);
250
251 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
252 if (mrt == NULL) {
253 err = -ENOMEM;
254 goto err1;
255 }
256
257 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
258 if (err < 0)
259 goto err2;
260
261 net->ipv4.mr_rules_ops = ops;
262 return 0;
263
264err2:
265 kfree(mrt);
266err1:
267 fib_rules_unregister(ops);
268 return err;
269}
270
271static void __net_exit ipmr_rules_exit(struct net *net)
272{
273 struct mr_table *mrt, *next;
274
275 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
276 list_del(&mrt->list);
277 ipmr_free_table(mrt);
278 }
279 fib_rules_unregister(net->ipv4.mr_rules_ops);
280}
281#else
282#define ipmr_for_each_table(mrt, net) \
283 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
284
285static struct mr_table *ipmr_get_table(struct net *net, u32 id)
286{
287 return net->ipv4.mrt;
288}
289
290static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
291 struct mr_table **mrt)
292{
293 *mrt = net->ipv4.mrt;
294 return 0;
295}
296
297static int __net_init ipmr_rules_init(struct net *net)
298{
299 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
300 return net->ipv4.mrt ? 0 : -ENOMEM;
301}
302
303static void __net_exit ipmr_rules_exit(struct net *net)
304{
305 ipmr_free_table(net->ipv4.mrt);
306}
307#endif
308
309static struct mr_table *ipmr_new_table(struct net *net, u32 id)
310{
311 struct mr_table *mrt;
312 unsigned int i;
313
314 mrt = ipmr_get_table(net, id);
315 if (mrt != NULL)
316 return mrt;
317
318 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
319 if (mrt == NULL)
320 return NULL;
321 write_pnet(&mrt->net, net);
322 mrt->id = id;
323
324 /* Forwarding cache */
325 for (i = 0; i < MFC_LINES; i++)
326 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
327
328 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
329
330 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
331 (unsigned long)mrt);
332
333#ifdef CONFIG_IP_PIMSM
334 mrt->mroute_reg_vif_num = -1;
335#endif
336#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
337 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
338#endif
339 return mrt;
340}
341
342static void ipmr_free_table(struct mr_table *mrt)
343{
344 del_timer_sync(&mrt->ipmr_expire_timer);
345 mroute_clean_tables(mrt);
346 kfree(mrt);
347}
348
349/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
350
351static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
352{
353 struct net *net = dev_net(dev);
354
355 dev_close(dev);
356
357 dev = __dev_get_by_name(net, "tunl0");
358 if (dev) {
359 const struct net_device_ops *ops = dev->netdev_ops;
360 struct ifreq ifr;
361 struct ip_tunnel_parm p;
362
363 memset(&p, 0, sizeof(p));
364 p.iph.daddr = v->vifc_rmt_addr.s_addr;
365 p.iph.saddr = v->vifc_lcl_addr.s_addr;
366 p.iph.version = 4;
367 p.iph.ihl = 5;
368 p.iph.protocol = IPPROTO_IPIP;
369 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
370 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
371
372 if (ops->ndo_do_ioctl) {
373 mm_segment_t oldfs = get_fs();
374
375 set_fs(KERNEL_DS);
376 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
377 set_fs(oldfs);
378 }
379 }
380}
381
382static
383struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
384{
385 struct net_device *dev;
386
387 dev = __dev_get_by_name(net, "tunl0");
388
389 if (dev) {
390 const struct net_device_ops *ops = dev->netdev_ops;
391 int err;
392 struct ifreq ifr;
393 struct ip_tunnel_parm p;
394 struct in_device *in_dev;
395
396 memset(&p, 0, sizeof(p));
397 p.iph.daddr = v->vifc_rmt_addr.s_addr;
398 p.iph.saddr = v->vifc_lcl_addr.s_addr;
399 p.iph.version = 4;
400 p.iph.ihl = 5;
401 p.iph.protocol = IPPROTO_IPIP;
402 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
403 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
404
405 if (ops->ndo_do_ioctl) {
406 mm_segment_t oldfs = get_fs();
407
408 set_fs(KERNEL_DS);
409 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
410 set_fs(oldfs);
411 } else {
412 err = -EOPNOTSUPP;
413 }
414 dev = NULL;
415
416 if (err == 0 &&
417 (dev = __dev_get_by_name(net, p.name)) != NULL) {
418 dev->flags |= IFF_MULTICAST;
419
420 in_dev = __in_dev_get_rtnl(dev);
421 if (in_dev == NULL)
422 goto failure;
423
424 ipv4_devconf_setall(in_dev);
425 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
426
427 if (dev_open(dev))
428 goto failure;
429 dev_hold(dev);
430 }
431 }
432 return dev;
433
434failure:
435 /* allow the register to be completed before unregistering. */
436 rtnl_unlock();
437 rtnl_lock();
438
439 unregister_netdevice(dev);
440 return NULL;
441}
442
443#ifdef CONFIG_IP_PIMSM
444
445static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
446{
447 struct net *net = dev_net(dev);
448 struct mr_table *mrt;
449 struct flowi4 fl4 = {
450 .flowi4_oif = dev->ifindex,
451 .flowi4_iif = skb->skb_iif,
452 .flowi4_mark = skb->mark,
453 };
454 int err;
455
456 err = ipmr_fib_lookup(net, &fl4, &mrt);
457 if (err < 0) {
458 kfree_skb(skb);
459 return err;
460 }
461
462 read_lock(&mrt_lock);
463 dev->stats.tx_bytes += skb->len;
464 dev->stats.tx_packets++;
465 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
466 read_unlock(&mrt_lock);
467 kfree_skb(skb);
468 return NETDEV_TX_OK;
469}
470
471static const struct net_device_ops reg_vif_netdev_ops = {
472 .ndo_start_xmit = reg_vif_xmit,
473};
474
475static void reg_vif_setup(struct net_device *dev)
476{
477 dev->type = ARPHRD_PIMREG;
478 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
479 dev->flags = IFF_NOARP;
480 dev->netdev_ops = ®_vif_netdev_ops,
481 dev->destructor = free_netdev;
482 dev->features |= NETIF_F_NETNS_LOCAL;
483}
484
485static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
486{
487 struct net_device *dev;
488 struct in_device *in_dev;
489 char name[IFNAMSIZ];
490
491 if (mrt->id == RT_TABLE_DEFAULT)
492 sprintf(name, "pimreg");
493 else
494 sprintf(name, "pimreg%u", mrt->id);
495
496 dev = alloc_netdev(0, name, reg_vif_setup);
497
498 if (dev == NULL)
499 return NULL;
500
501 dev_net_set(dev, net);
502
503 if (register_netdevice(dev)) {
504 free_netdev(dev);
505 return NULL;
506 }
507 dev->iflink = 0;
508
509 rcu_read_lock();
510 in_dev = __in_dev_get_rcu(dev);
511 if (!in_dev) {
512 rcu_read_unlock();
513 goto failure;
514 }
515
516 ipv4_devconf_setall(in_dev);
517 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
518 rcu_read_unlock();
519
520 if (dev_open(dev))
521 goto failure;
522
523 dev_hold(dev);
524
525 return dev;
526
527failure:
528 /* allow the register to be completed before unregistering. */
529 rtnl_unlock();
530 rtnl_lock();
531
532 unregister_netdevice(dev);
533 return NULL;
534}
535#endif
536
537/*
538 * Delete a VIF entry
539 * @notify: Set to 1, if the caller is a notifier_call
540 */
541
542static int vif_delete(struct mr_table *mrt, int vifi, int notify,
543 struct list_head *head)
544{
545 struct vif_device *v;
546 struct net_device *dev;
547 struct in_device *in_dev;
548
549 if (vifi < 0 || vifi >= mrt->maxvif)
550 return -EADDRNOTAVAIL;
551
552 v = &mrt->vif_table[vifi];
553
554 write_lock_bh(&mrt_lock);
555 dev = v->dev;
556 v->dev = NULL;
557
558 if (!dev) {
559 write_unlock_bh(&mrt_lock);
560 return -EADDRNOTAVAIL;
561 }
562
563#ifdef CONFIG_IP_PIMSM
564 if (vifi == mrt->mroute_reg_vif_num)
565 mrt->mroute_reg_vif_num = -1;
566#endif
567
568 if (vifi + 1 == mrt->maxvif) {
569 int tmp;
570
571 for (tmp = vifi - 1; tmp >= 0; tmp--) {
572 if (VIF_EXISTS(mrt, tmp))
573 break;
574 }
575 mrt->maxvif = tmp+1;
576 }
577
578 write_unlock_bh(&mrt_lock);
579
580 dev_set_allmulti(dev, -1);
581
582 in_dev = __in_dev_get_rtnl(dev);
583 if (in_dev) {
584 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
585 ip_rt_multicast_event(in_dev);
586 }
587
588 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
589 unregister_netdevice_queue(dev, head);
590
591 dev_put(dev);
592 return 0;
593}
594
595static void ipmr_cache_free_rcu(struct rcu_head *head)
596{
597 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
598
599 kmem_cache_free(mrt_cachep, c);
600}
601
602static inline void ipmr_cache_free(struct mfc_cache *c)
603{
604 call_rcu(&c->rcu, ipmr_cache_free_rcu);
605}
606
607/* Destroy an unresolved cache entry, killing queued skbs
608 * and reporting error to netlink readers.
609 */
610
611static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
612{
613 struct net *net = read_pnet(&mrt->net);
614 struct sk_buff *skb;
615 struct nlmsgerr *e;
616
617 atomic_dec(&mrt->cache_resolve_queue_len);
618
619 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
620 if (ip_hdr(skb)->version == 0) {
621 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
622 nlh->nlmsg_type = NLMSG_ERROR;
623 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
624 skb_trim(skb, nlh->nlmsg_len);
625 e = NLMSG_DATA(nlh);
626 e->error = -ETIMEDOUT;
627 memset(&e->msg, 0, sizeof(e->msg));
628
629 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
630 } else {
631 kfree_skb(skb);
632 }
633 }
634
635 ipmr_cache_free(c);
636}
637
638
639/* Timer process for the unresolved queue. */
640
641static void ipmr_expire_process(unsigned long arg)
642{
643 struct mr_table *mrt = (struct mr_table *)arg;
644 unsigned long now;
645 unsigned long expires;
646 struct mfc_cache *c, *next;
647
648 if (!spin_trylock(&mfc_unres_lock)) {
649 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
650 return;
651 }
652
653 if (list_empty(&mrt->mfc_unres_queue))
654 goto out;
655
656 now = jiffies;
657 expires = 10*HZ;
658
659 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
660 if (time_after(c->mfc_un.unres.expires, now)) {
661 unsigned long interval = c->mfc_un.unres.expires - now;
662 if (interval < expires)
663 expires = interval;
664 continue;
665 }
666
667 list_del(&c->list);
668 ipmr_destroy_unres(mrt, c);
669 }
670
671 if (!list_empty(&mrt->mfc_unres_queue))
672 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
673
674out:
675 spin_unlock(&mfc_unres_lock);
676}
677
678/* Fill oifs list. It is called under write locked mrt_lock. */
679
680static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
681 unsigned char *ttls)
682{
683 int vifi;
684
685 cache->mfc_un.res.minvif = MAXVIFS;
686 cache->mfc_un.res.maxvif = 0;
687 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
688
689 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
690 if (VIF_EXISTS(mrt, vifi) &&
691 ttls[vifi] && ttls[vifi] < 255) {
692 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
693 if (cache->mfc_un.res.minvif > vifi)
694 cache->mfc_un.res.minvif = vifi;
695 if (cache->mfc_un.res.maxvif <= vifi)
696 cache->mfc_un.res.maxvif = vifi + 1;
697 }
698 }
699}
700
701static int vif_add(struct net *net, struct mr_table *mrt,
702 struct vifctl *vifc, int mrtsock)
703{
704 int vifi = vifc->vifc_vifi;
705 struct vif_device *v = &mrt->vif_table[vifi];
706 struct net_device *dev;
707 struct in_device *in_dev;
708 int err;
709
710 /* Is vif busy ? */
711 if (VIF_EXISTS(mrt, vifi))
712 return -EADDRINUSE;
713
714 switch (vifc->vifc_flags) {
715#ifdef CONFIG_IP_PIMSM
716 case VIFF_REGISTER:
717 /*
718 * Special Purpose VIF in PIM
719 * All the packets will be sent to the daemon
720 */
721 if (mrt->mroute_reg_vif_num >= 0)
722 return -EADDRINUSE;
723 dev = ipmr_reg_vif(net, mrt);
724 if (!dev)
725 return -ENOBUFS;
726 err = dev_set_allmulti(dev, 1);
727 if (err) {
728 unregister_netdevice(dev);
729 dev_put(dev);
730 return err;
731 }
732 break;
733#endif
734 case VIFF_TUNNEL:
735 dev = ipmr_new_tunnel(net, vifc);
736 if (!dev)
737 return -ENOBUFS;
738 err = dev_set_allmulti(dev, 1);
739 if (err) {
740 ipmr_del_tunnel(dev, vifc);
741 dev_put(dev);
742 return err;
743 }
744 break;
745
746 case VIFF_USE_IFINDEX:
747 case 0:
748 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
749 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
750 if (dev && __in_dev_get_rtnl(dev) == NULL) {
751 dev_put(dev);
752 return -EADDRNOTAVAIL;
753 }
754 } else {
755 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
756 }
757 if (!dev)
758 return -EADDRNOTAVAIL;
759 err = dev_set_allmulti(dev, 1);
760 if (err) {
761 dev_put(dev);
762 return err;
763 }
764 break;
765 default:
766 return -EINVAL;
767 }
768
769 in_dev = __in_dev_get_rtnl(dev);
770 if (!in_dev) {
771 dev_put(dev);
772 return -EADDRNOTAVAIL;
773 }
774 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
775 ip_rt_multicast_event(in_dev);
776
777 /* Fill in the VIF structures */
778
779 v->rate_limit = vifc->vifc_rate_limit;
780 v->local = vifc->vifc_lcl_addr.s_addr;
781 v->remote = vifc->vifc_rmt_addr.s_addr;
782 v->flags = vifc->vifc_flags;
783 if (!mrtsock)
784 v->flags |= VIFF_STATIC;
785 v->threshold = vifc->vifc_threshold;
786 v->bytes_in = 0;
787 v->bytes_out = 0;
788 v->pkt_in = 0;
789 v->pkt_out = 0;
790 v->link = dev->ifindex;
791 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
792 v->link = dev->iflink;
793
794 /* And finish update writing critical data */
795 write_lock_bh(&mrt_lock);
796 v->dev = dev;
797#ifdef CONFIG_IP_PIMSM
798 if (v->flags & VIFF_REGISTER)
799 mrt->mroute_reg_vif_num = vifi;
800#endif
801 if (vifi+1 > mrt->maxvif)
802 mrt->maxvif = vifi+1;
803 write_unlock_bh(&mrt_lock);
804 return 0;
805}
806
807/* called with rcu_read_lock() */
808static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
809 __be32 origin,
810 __be32 mcastgrp)
811{
812 int line = MFC_HASH(mcastgrp, origin);
813 struct mfc_cache *c;
814
815 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
816 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
817 return c;
818 }
819 return NULL;
820}
821
822/*
823 * Allocate a multicast cache entry
824 */
825static struct mfc_cache *ipmr_cache_alloc(void)
826{
827 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
828
829 if (c)
830 c->mfc_un.res.minvif = MAXVIFS;
831 return c;
832}
833
834static struct mfc_cache *ipmr_cache_alloc_unres(void)
835{
836 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
837
838 if (c) {
839 skb_queue_head_init(&c->mfc_un.unres.unresolved);
840 c->mfc_un.unres.expires = jiffies + 10*HZ;
841 }
842 return c;
843}
844
845/*
846 * A cache entry has gone into a resolved state from queued
847 */
848
849static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
850 struct mfc_cache *uc, struct mfc_cache *c)
851{
852 struct sk_buff *skb;
853 struct nlmsgerr *e;
854
855 /* Play the pending entries through our router */
856
857 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
858 if (ip_hdr(skb)->version == 0) {
859 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
860
861 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
862 nlh->nlmsg_len = skb_tail_pointer(skb) -
863 (u8 *)nlh;
864 } else {
865 nlh->nlmsg_type = NLMSG_ERROR;
866 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
867 skb_trim(skb, nlh->nlmsg_len);
868 e = NLMSG_DATA(nlh);
869 e->error = -EMSGSIZE;
870 memset(&e->msg, 0, sizeof(e->msg));
871 }
872
873 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
874 } else {
875 ip_mr_forward(net, mrt, skb, c, 0);
876 }
877 }
878}
879
880/*
881 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
882 * expects the following bizarre scheme.
883 *
884 * Called under mrt_lock.
885 */
886
887static int ipmr_cache_report(struct mr_table *mrt,
888 struct sk_buff *pkt, vifi_t vifi, int assert)
889{
890 struct sk_buff *skb;
891 const int ihl = ip_hdrlen(pkt);
892 struct igmphdr *igmp;
893 struct igmpmsg *msg;
894 struct sock *mroute_sk;
895 int ret;
896
897#ifdef CONFIG_IP_PIMSM
898 if (assert == IGMPMSG_WHOLEPKT)
899 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
900 else
901#endif
902 skb = alloc_skb(128, GFP_ATOMIC);
903
904 if (!skb)
905 return -ENOBUFS;
906
907#ifdef CONFIG_IP_PIMSM
908 if (assert == IGMPMSG_WHOLEPKT) {
909 /* Ugly, but we have no choice with this interface.
910 * Duplicate old header, fix ihl, length etc.
911 * And all this only to mangle msg->im_msgtype and
912 * to set msg->im_mbz to "mbz" :-)
913 */
914 skb_push(skb, sizeof(struct iphdr));
915 skb_reset_network_header(skb);
916 skb_reset_transport_header(skb);
917 msg = (struct igmpmsg *)skb_network_header(skb);
918 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
919 msg->im_msgtype = IGMPMSG_WHOLEPKT;
920 msg->im_mbz = 0;
921 msg->im_vif = mrt->mroute_reg_vif_num;
922 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
923 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
924 sizeof(struct iphdr));
925 } else
926#endif
927 {
928
929 /* Copy the IP header */
930
931 skb->network_header = skb->tail;
932 skb_put(skb, ihl);
933 skb_copy_to_linear_data(skb, pkt->data, ihl);
934 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
935 msg = (struct igmpmsg *)skb_network_header(skb);
936 msg->im_vif = vifi;
937 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
938
939 /* Add our header */
940
941 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
942 igmp->type =
943 msg->im_msgtype = assert;
944 igmp->code = 0;
945 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
946 skb->transport_header = skb->network_header;
947 }
948
949 rcu_read_lock();
950 mroute_sk = rcu_dereference(mrt->mroute_sk);
951 if (mroute_sk == NULL) {
952 rcu_read_unlock();
953 kfree_skb(skb);
954 return -EINVAL;
955 }
956
957 /* Deliver to mrouted */
958
959 ret = sock_queue_rcv_skb(mroute_sk, skb);
960 rcu_read_unlock();
961 if (ret < 0) {
962 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
963 kfree_skb(skb);
964 }
965
966 return ret;
967}
968
969/*
970 * Queue a packet for resolution. It gets locked cache entry!
971 */
972
973static int
974ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
975{
976 bool found = false;
977 int err;
978 struct mfc_cache *c;
979 const struct iphdr *iph = ip_hdr(skb);
980
981 spin_lock_bh(&mfc_unres_lock);
982 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
983 if (c->mfc_mcastgrp == iph->daddr &&
984 c->mfc_origin == iph->saddr) {
985 found = true;
986 break;
987 }
988 }
989
990 if (!found) {
991 /* Create a new entry if allowable */
992
993 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
994 (c = ipmr_cache_alloc_unres()) == NULL) {
995 spin_unlock_bh(&mfc_unres_lock);
996
997 kfree_skb(skb);
998 return -ENOBUFS;
999 }
1000
1001 /* Fill in the new cache entry */
1002
1003 c->mfc_parent = -1;
1004 c->mfc_origin = iph->saddr;
1005 c->mfc_mcastgrp = iph->daddr;
1006
1007 /* Reflect first query at mrouted. */
1008
1009 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1010 if (err < 0) {
1011 /* If the report failed throw the cache entry
1012 out - Brad Parker
1013 */
1014 spin_unlock_bh(&mfc_unres_lock);
1015
1016 ipmr_cache_free(c);
1017 kfree_skb(skb);
1018 return err;
1019 }
1020
1021 atomic_inc(&mrt->cache_resolve_queue_len);
1022 list_add(&c->list, &mrt->mfc_unres_queue);
1023
1024 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1025 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1026 }
1027
1028 /* See if we can append the packet */
1029
1030 if (c->mfc_un.unres.unresolved.qlen > 3) {
1031 kfree_skb(skb);
1032 err = -ENOBUFS;
1033 } else {
1034 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1035 err = 0;
1036 }
1037
1038 spin_unlock_bh(&mfc_unres_lock);
1039 return err;
1040}
1041
1042/*
1043 * MFC cache manipulation by user space mroute daemon
1044 */
1045
1046static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1047{
1048 int line;
1049 struct mfc_cache *c, *next;
1050
1051 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1052
1053 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1054 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1055 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1056 list_del_rcu(&c->list);
1057
1058 ipmr_cache_free(c);
1059 return 0;
1060 }
1061 }
1062 return -ENOENT;
1063}
1064
1065static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1066 struct mfcctl *mfc, int mrtsock)
1067{
1068 bool found = false;
1069 int line;
1070 struct mfc_cache *uc, *c;
1071
1072 if (mfc->mfcc_parent >= MAXVIFS)
1073 return -ENFILE;
1074
1075 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1076
1077 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1078 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1079 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1080 found = true;
1081 break;
1082 }
1083 }
1084
1085 if (found) {
1086 write_lock_bh(&mrt_lock);
1087 c->mfc_parent = mfc->mfcc_parent;
1088 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1089 if (!mrtsock)
1090 c->mfc_flags |= MFC_STATIC;
1091 write_unlock_bh(&mrt_lock);
1092 return 0;
1093 }
1094
1095 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1096 return -EINVAL;
1097
1098 c = ipmr_cache_alloc();
1099 if (c == NULL)
1100 return -ENOMEM;
1101
1102 c->mfc_origin = mfc->mfcc_origin.s_addr;
1103 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1104 c->mfc_parent = mfc->mfcc_parent;
1105 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1106 if (!mrtsock)
1107 c->mfc_flags |= MFC_STATIC;
1108
1109 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1110
1111 /*
1112 * Check to see if we resolved a queued list. If so we
1113 * need to send on the frames and tidy up.
1114 */
1115 found = false;
1116 spin_lock_bh(&mfc_unres_lock);
1117 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1118 if (uc->mfc_origin == c->mfc_origin &&
1119 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1120 list_del(&uc->list);
1121 atomic_dec(&mrt->cache_resolve_queue_len);
1122 found = true;
1123 break;
1124 }
1125 }
1126 if (list_empty(&mrt->mfc_unres_queue))
1127 del_timer(&mrt->ipmr_expire_timer);
1128 spin_unlock_bh(&mfc_unres_lock);
1129
1130 if (found) {
1131 ipmr_cache_resolve(net, mrt, uc, c);
1132 ipmr_cache_free(uc);
1133 }
1134 return 0;
1135}
1136
1137/*
1138 * Close the multicast socket, and clear the vif tables etc
1139 */
1140
1141static void mroute_clean_tables(struct mr_table *mrt)
1142{
1143 int i;
1144 LIST_HEAD(list);
1145 struct mfc_cache *c, *next;
1146
1147 /* Shut down all active vif entries */
1148
1149 for (i = 0; i < mrt->maxvif; i++) {
1150 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1151 vif_delete(mrt, i, 0, &list);
1152 }
1153 unregister_netdevice_many(&list);
1154
1155 /* Wipe the cache */
1156
1157 for (i = 0; i < MFC_LINES; i++) {
1158 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1159 if (c->mfc_flags & MFC_STATIC)
1160 continue;
1161 list_del_rcu(&c->list);
1162 ipmr_cache_free(c);
1163 }
1164 }
1165
1166 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1167 spin_lock_bh(&mfc_unres_lock);
1168 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1169 list_del(&c->list);
1170 ipmr_destroy_unres(mrt, c);
1171 }
1172 spin_unlock_bh(&mfc_unres_lock);
1173 }
1174}
1175
1176/* called from ip_ra_control(), before an RCU grace period,
1177 * we dont need to call synchronize_rcu() here
1178 */
1179static void mrtsock_destruct(struct sock *sk)
1180{
1181 struct net *net = sock_net(sk);
1182 struct mr_table *mrt;
1183
1184 rtnl_lock();
1185 ipmr_for_each_table(mrt, net) {
1186 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1187 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1188 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1189 mroute_clean_tables(mrt);
1190 }
1191 }
1192 rtnl_unlock();
1193}
1194
1195/*
1196 * Socket options and virtual interface manipulation. The whole
1197 * virtual interface system is a complete heap, but unfortunately
1198 * that's how BSD mrouted happens to think. Maybe one day with a proper
1199 * MOSPF/PIM router set up we can clean this up.
1200 */
1201
1202int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1203{
1204 int ret;
1205 struct vifctl vif;
1206 struct mfcctl mfc;
1207 struct net *net = sock_net(sk);
1208 struct mr_table *mrt;
1209
1210 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1211 if (mrt == NULL)
1212 return -ENOENT;
1213
1214 if (optname != MRT_INIT) {
1215 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1216 !capable(CAP_NET_ADMIN))
1217 return -EACCES;
1218 }
1219
1220 switch (optname) {
1221 case MRT_INIT:
1222 if (sk->sk_type != SOCK_RAW ||
1223 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1224 return -EOPNOTSUPP;
1225 if (optlen != sizeof(int))
1226 return -ENOPROTOOPT;
1227
1228 rtnl_lock();
1229 if (rtnl_dereference(mrt->mroute_sk)) {
1230 rtnl_unlock();
1231 return -EADDRINUSE;
1232 }
1233
1234 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1235 if (ret == 0) {
1236 rcu_assign_pointer(mrt->mroute_sk, sk);
1237 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1238 }
1239 rtnl_unlock();
1240 return ret;
1241 case MRT_DONE:
1242 if (sk != rcu_access_pointer(mrt->mroute_sk))
1243 return -EACCES;
1244 return ip_ra_control(sk, 0, NULL);
1245 case MRT_ADD_VIF:
1246 case MRT_DEL_VIF:
1247 if (optlen != sizeof(vif))
1248 return -EINVAL;
1249 if (copy_from_user(&vif, optval, sizeof(vif)))
1250 return -EFAULT;
1251 if (vif.vifc_vifi >= MAXVIFS)
1252 return -ENFILE;
1253 rtnl_lock();
1254 if (optname == MRT_ADD_VIF) {
1255 ret = vif_add(net, mrt, &vif,
1256 sk == rtnl_dereference(mrt->mroute_sk));
1257 } else {
1258 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1259 }
1260 rtnl_unlock();
1261 return ret;
1262
1263 /*
1264 * Manipulate the forwarding caches. These live
1265 * in a sort of kernel/user symbiosis.
1266 */
1267 case MRT_ADD_MFC:
1268 case MRT_DEL_MFC:
1269 if (optlen != sizeof(mfc))
1270 return -EINVAL;
1271 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1272 return -EFAULT;
1273 rtnl_lock();
1274 if (optname == MRT_DEL_MFC)
1275 ret = ipmr_mfc_delete(mrt, &mfc);
1276 else
1277 ret = ipmr_mfc_add(net, mrt, &mfc,
1278 sk == rtnl_dereference(mrt->mroute_sk));
1279 rtnl_unlock();
1280 return ret;
1281 /*
1282 * Control PIM assert.
1283 */
1284 case MRT_ASSERT:
1285 {
1286 int v;
1287 if (get_user(v, (int __user *)optval))
1288 return -EFAULT;
1289 mrt->mroute_do_assert = (v) ? 1 : 0;
1290 return 0;
1291 }
1292#ifdef CONFIG_IP_PIMSM
1293 case MRT_PIM:
1294 {
1295 int v;
1296
1297 if (get_user(v, (int __user *)optval))
1298 return -EFAULT;
1299 v = (v) ? 1 : 0;
1300
1301 rtnl_lock();
1302 ret = 0;
1303 if (v != mrt->mroute_do_pim) {
1304 mrt->mroute_do_pim = v;
1305 mrt->mroute_do_assert = v;
1306 }
1307 rtnl_unlock();
1308 return ret;
1309 }
1310#endif
1311#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1312 case MRT_TABLE:
1313 {
1314 u32 v;
1315
1316 if (optlen != sizeof(u32))
1317 return -EINVAL;
1318 if (get_user(v, (u32 __user *)optval))
1319 return -EFAULT;
1320
1321 rtnl_lock();
1322 ret = 0;
1323 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1324 ret = -EBUSY;
1325 } else {
1326 if (!ipmr_new_table(net, v))
1327 ret = -ENOMEM;
1328 raw_sk(sk)->ipmr_table = v;
1329 }
1330 rtnl_unlock();
1331 return ret;
1332 }
1333#endif
1334 /*
1335 * Spurious command, or MRT_VERSION which you cannot
1336 * set.
1337 */
1338 default:
1339 return -ENOPROTOOPT;
1340 }
1341}
1342
1343/*
1344 * Getsock opt support for the multicast routing system.
1345 */
1346
1347int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1348{
1349 int olr;
1350 int val;
1351 struct net *net = sock_net(sk);
1352 struct mr_table *mrt;
1353
1354 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1355 if (mrt == NULL)
1356 return -ENOENT;
1357
1358 if (optname != MRT_VERSION &&
1359#ifdef CONFIG_IP_PIMSM
1360 optname != MRT_PIM &&
1361#endif
1362 optname != MRT_ASSERT)
1363 return -ENOPROTOOPT;
1364
1365 if (get_user(olr, optlen))
1366 return -EFAULT;
1367
1368 olr = min_t(unsigned int, olr, sizeof(int));
1369 if (olr < 0)
1370 return -EINVAL;
1371
1372 if (put_user(olr, optlen))
1373 return -EFAULT;
1374 if (optname == MRT_VERSION)
1375 val = 0x0305;
1376#ifdef CONFIG_IP_PIMSM
1377 else if (optname == MRT_PIM)
1378 val = mrt->mroute_do_pim;
1379#endif
1380 else
1381 val = mrt->mroute_do_assert;
1382 if (copy_to_user(optval, &val, olr))
1383 return -EFAULT;
1384 return 0;
1385}
1386
1387/*
1388 * The IP multicast ioctl support routines.
1389 */
1390
1391int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1392{
1393 struct sioc_sg_req sr;
1394 struct sioc_vif_req vr;
1395 struct vif_device *vif;
1396 struct mfc_cache *c;
1397 struct net *net = sock_net(sk);
1398 struct mr_table *mrt;
1399
1400 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1401 if (mrt == NULL)
1402 return -ENOENT;
1403
1404 switch (cmd) {
1405 case SIOCGETVIFCNT:
1406 if (copy_from_user(&vr, arg, sizeof(vr)))
1407 return -EFAULT;
1408 if (vr.vifi >= mrt->maxvif)
1409 return -EINVAL;
1410 read_lock(&mrt_lock);
1411 vif = &mrt->vif_table[vr.vifi];
1412 if (VIF_EXISTS(mrt, vr.vifi)) {
1413 vr.icount = vif->pkt_in;
1414 vr.ocount = vif->pkt_out;
1415 vr.ibytes = vif->bytes_in;
1416 vr.obytes = vif->bytes_out;
1417 read_unlock(&mrt_lock);
1418
1419 if (copy_to_user(arg, &vr, sizeof(vr)))
1420 return -EFAULT;
1421 return 0;
1422 }
1423 read_unlock(&mrt_lock);
1424 return -EADDRNOTAVAIL;
1425 case SIOCGETSGCNT:
1426 if (copy_from_user(&sr, arg, sizeof(sr)))
1427 return -EFAULT;
1428
1429 rcu_read_lock();
1430 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1431 if (c) {
1432 sr.pktcnt = c->mfc_un.res.pkt;
1433 sr.bytecnt = c->mfc_un.res.bytes;
1434 sr.wrong_if = c->mfc_un.res.wrong_if;
1435 rcu_read_unlock();
1436
1437 if (copy_to_user(arg, &sr, sizeof(sr)))
1438 return -EFAULT;
1439 return 0;
1440 }
1441 rcu_read_unlock();
1442 return -EADDRNOTAVAIL;
1443 default:
1444 return -ENOIOCTLCMD;
1445 }
1446}
1447
1448#ifdef CONFIG_COMPAT
1449struct compat_sioc_sg_req {
1450 struct in_addr src;
1451 struct in_addr grp;
1452 compat_ulong_t pktcnt;
1453 compat_ulong_t bytecnt;
1454 compat_ulong_t wrong_if;
1455};
1456
1457struct compat_sioc_vif_req {
1458 vifi_t vifi; /* Which iface */
1459 compat_ulong_t icount;
1460 compat_ulong_t ocount;
1461 compat_ulong_t ibytes;
1462 compat_ulong_t obytes;
1463};
1464
1465int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1466{
1467 struct compat_sioc_sg_req sr;
1468 struct compat_sioc_vif_req vr;
1469 struct vif_device *vif;
1470 struct mfc_cache *c;
1471 struct net *net = sock_net(sk);
1472 struct mr_table *mrt;
1473
1474 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1475 if (mrt == NULL)
1476 return -ENOENT;
1477
1478 switch (cmd) {
1479 case SIOCGETVIFCNT:
1480 if (copy_from_user(&vr, arg, sizeof(vr)))
1481 return -EFAULT;
1482 if (vr.vifi >= mrt->maxvif)
1483 return -EINVAL;
1484 read_lock(&mrt_lock);
1485 vif = &mrt->vif_table[vr.vifi];
1486 if (VIF_EXISTS(mrt, vr.vifi)) {
1487 vr.icount = vif->pkt_in;
1488 vr.ocount = vif->pkt_out;
1489 vr.ibytes = vif->bytes_in;
1490 vr.obytes = vif->bytes_out;
1491 read_unlock(&mrt_lock);
1492
1493 if (copy_to_user(arg, &vr, sizeof(vr)))
1494 return -EFAULT;
1495 return 0;
1496 }
1497 read_unlock(&mrt_lock);
1498 return -EADDRNOTAVAIL;
1499 case SIOCGETSGCNT:
1500 if (copy_from_user(&sr, arg, sizeof(sr)))
1501 return -EFAULT;
1502
1503 rcu_read_lock();
1504 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1505 if (c) {
1506 sr.pktcnt = c->mfc_un.res.pkt;
1507 sr.bytecnt = c->mfc_un.res.bytes;
1508 sr.wrong_if = c->mfc_un.res.wrong_if;
1509 rcu_read_unlock();
1510
1511 if (copy_to_user(arg, &sr, sizeof(sr)))
1512 return -EFAULT;
1513 return 0;
1514 }
1515 rcu_read_unlock();
1516 return -EADDRNOTAVAIL;
1517 default:
1518 return -ENOIOCTLCMD;
1519 }
1520}
1521#endif
1522
1523
1524static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1525{
1526 struct net_device *dev = ptr;
1527 struct net *net = dev_net(dev);
1528 struct mr_table *mrt;
1529 struct vif_device *v;
1530 int ct;
1531
1532 if (event != NETDEV_UNREGISTER)
1533 return NOTIFY_DONE;
1534
1535 ipmr_for_each_table(mrt, net) {
1536 v = &mrt->vif_table[0];
1537 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1538 if (v->dev == dev)
1539 vif_delete(mrt, ct, 1, NULL);
1540 }
1541 }
1542 return NOTIFY_DONE;
1543}
1544
1545
1546static struct notifier_block ip_mr_notifier = {
1547 .notifier_call = ipmr_device_event,
1548};
1549
1550/*
1551 * Encapsulate a packet by attaching a valid IPIP header to it.
1552 * This avoids tunnel drivers and other mess and gives us the speed so
1553 * important for multicast video.
1554 */
1555
1556static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1557{
1558 struct iphdr *iph;
1559 const struct iphdr *old_iph = ip_hdr(skb);
1560
1561 skb_push(skb, sizeof(struct iphdr));
1562 skb->transport_header = skb->network_header;
1563 skb_reset_network_header(skb);
1564 iph = ip_hdr(skb);
1565
1566 iph->version = 4;
1567 iph->tos = old_iph->tos;
1568 iph->ttl = old_iph->ttl;
1569 iph->frag_off = 0;
1570 iph->daddr = daddr;
1571 iph->saddr = saddr;
1572 iph->protocol = IPPROTO_IPIP;
1573 iph->ihl = 5;
1574 iph->tot_len = htons(skb->len);
1575 ip_select_ident(iph, skb_dst(skb), NULL);
1576 ip_send_check(iph);
1577
1578 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1579 nf_reset(skb);
1580}
1581
1582static inline int ipmr_forward_finish(struct sk_buff *skb)
1583{
1584 struct ip_options *opt = &(IPCB(skb)->opt);
1585
1586 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1587 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
1588
1589 if (unlikely(opt->optlen))
1590 ip_forward_options(skb);
1591
1592 return dst_output(skb);
1593}
1594
1595/*
1596 * Processing handlers for ipmr_forward
1597 */
1598
1599static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1600 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1601{
1602 const struct iphdr *iph = ip_hdr(skb);
1603 struct vif_device *vif = &mrt->vif_table[vifi];
1604 struct net_device *dev;
1605 struct rtable *rt;
1606 struct flowi4 fl4;
1607 int encap = 0;
1608
1609 if (vif->dev == NULL)
1610 goto out_free;
1611
1612#ifdef CONFIG_IP_PIMSM
1613 if (vif->flags & VIFF_REGISTER) {
1614 vif->pkt_out++;
1615 vif->bytes_out += skb->len;
1616 vif->dev->stats.tx_bytes += skb->len;
1617 vif->dev->stats.tx_packets++;
1618 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1619 goto out_free;
1620 }
1621#endif
1622
1623 if (vif->flags & VIFF_TUNNEL) {
1624 rt = ip_route_output_ports(net, &fl4, NULL,
1625 vif->remote, vif->local,
1626 0, 0,
1627 IPPROTO_IPIP,
1628 RT_TOS(iph->tos), vif->link);
1629 if (IS_ERR(rt))
1630 goto out_free;
1631 encap = sizeof(struct iphdr);
1632 } else {
1633 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1634 0, 0,
1635 IPPROTO_IPIP,
1636 RT_TOS(iph->tos), vif->link);
1637 if (IS_ERR(rt))
1638 goto out_free;
1639 }
1640
1641 dev = rt->dst.dev;
1642
1643 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1644 /* Do not fragment multicasts. Alas, IPv4 does not
1645 * allow to send ICMP, so that packets will disappear
1646 * to blackhole.
1647 */
1648
1649 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1650 ip_rt_put(rt);
1651 goto out_free;
1652 }
1653
1654 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1655
1656 if (skb_cow(skb, encap)) {
1657 ip_rt_put(rt);
1658 goto out_free;
1659 }
1660
1661 vif->pkt_out++;
1662 vif->bytes_out += skb->len;
1663
1664 skb_dst_drop(skb);
1665 skb_dst_set(skb, &rt->dst);
1666 ip_decrease_ttl(ip_hdr(skb));
1667
1668 /* FIXME: forward and output firewalls used to be called here.
1669 * What do we do with netfilter? -- RR
1670 */
1671 if (vif->flags & VIFF_TUNNEL) {
1672 ip_encap(skb, vif->local, vif->remote);
1673 /* FIXME: extra output firewall step used to be here. --RR */
1674 vif->dev->stats.tx_packets++;
1675 vif->dev->stats.tx_bytes += skb->len;
1676 }
1677
1678 IPCB(skb)->flags |= IPSKB_FORWARDED;
1679
1680 /*
1681 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1682 * not only before forwarding, but after forwarding on all output
1683 * interfaces. It is clear, if mrouter runs a multicasting
1684 * program, it should receive packets not depending to what interface
1685 * program is joined.
1686 * If we will not make it, the program will have to join on all
1687 * interfaces. On the other hand, multihoming host (or router, but
1688 * not mrouter) cannot join to more than one interface - it will
1689 * result in receiving multiple packets.
1690 */
1691 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
1692 ipmr_forward_finish);
1693 return;
1694
1695out_free:
1696 kfree_skb(skb);
1697}
1698
1699static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1700{
1701 int ct;
1702
1703 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1704 if (mrt->vif_table[ct].dev == dev)
1705 break;
1706 }
1707 return ct;
1708}
1709
1710/* "local" means that we should preserve one skb (for local delivery) */
1711
1712static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1713 struct sk_buff *skb, struct mfc_cache *cache,
1714 int local)
1715{
1716 int psend = -1;
1717 int vif, ct;
1718
1719 vif = cache->mfc_parent;
1720 cache->mfc_un.res.pkt++;
1721 cache->mfc_un.res.bytes += skb->len;
1722
1723 /*
1724 * Wrong interface: drop packet and (maybe) send PIM assert.
1725 */
1726 if (mrt->vif_table[vif].dev != skb->dev) {
1727 int true_vifi;
1728
1729 if (rt_is_output_route(skb_rtable(skb))) {
1730 /* It is our own packet, looped back.
1731 * Very complicated situation...
1732 *
1733 * The best workaround until routing daemons will be
1734 * fixed is not to redistribute packet, if it was
1735 * send through wrong interface. It means, that
1736 * multicast applications WILL NOT work for
1737 * (S,G), which have default multicast route pointing
1738 * to wrong oif. In any case, it is not a good
1739 * idea to use multicasting applications on router.
1740 */
1741 goto dont_forward;
1742 }
1743
1744 cache->mfc_un.res.wrong_if++;
1745 true_vifi = ipmr_find_vif(mrt, skb->dev);
1746
1747 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1748 /* pimsm uses asserts, when switching from RPT to SPT,
1749 * so that we cannot check that packet arrived on an oif.
1750 * It is bad, but otherwise we would need to move pretty
1751 * large chunk of pimd to kernel. Ough... --ANK
1752 */
1753 (mrt->mroute_do_pim ||
1754 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1755 time_after(jiffies,
1756 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1757 cache->mfc_un.res.last_assert = jiffies;
1758 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1759 }
1760 goto dont_forward;
1761 }
1762
1763 mrt->vif_table[vif].pkt_in++;
1764 mrt->vif_table[vif].bytes_in += skb->len;
1765
1766 /*
1767 * Forward the frame
1768 */
1769 for (ct = cache->mfc_un.res.maxvif - 1;
1770 ct >= cache->mfc_un.res.minvif; ct--) {
1771 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1772 if (psend != -1) {
1773 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1774
1775 if (skb2)
1776 ipmr_queue_xmit(net, mrt, skb2, cache,
1777 psend);
1778 }
1779 psend = ct;
1780 }
1781 }
1782 if (psend != -1) {
1783 if (local) {
1784 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1785
1786 if (skb2)
1787 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1788 } else {
1789 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1790 return 0;
1791 }
1792 }
1793
1794dont_forward:
1795 if (!local)
1796 kfree_skb(skb);
1797 return 0;
1798}
1799
1800static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1801{
1802 struct rtable *rt = skb_rtable(skb);
1803 struct iphdr *iph = ip_hdr(skb);
1804 struct flowi4 fl4 = {
1805 .daddr = iph->daddr,
1806 .saddr = iph->saddr,
1807 .flowi4_tos = RT_TOS(iph->tos),
1808 .flowi4_oif = rt->rt_oif,
1809 .flowi4_iif = rt->rt_iif,
1810 .flowi4_mark = rt->rt_mark,
1811 };
1812 struct mr_table *mrt;
1813 int err;
1814
1815 err = ipmr_fib_lookup(net, &fl4, &mrt);
1816 if (err)
1817 return ERR_PTR(err);
1818 return mrt;
1819}
1820
1821/*
1822 * Multicast packets for forwarding arrive here
1823 * Called with rcu_read_lock();
1824 */
1825
1826int ip_mr_input(struct sk_buff *skb)
1827{
1828 struct mfc_cache *cache;
1829 struct net *net = dev_net(skb->dev);
1830 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1831 struct mr_table *mrt;
1832
1833 /* Packet is looped back after forward, it should not be
1834 * forwarded second time, but still can be delivered locally.
1835 */
1836 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1837 goto dont_forward;
1838
1839 mrt = ipmr_rt_fib_lookup(net, skb);
1840 if (IS_ERR(mrt)) {
1841 kfree_skb(skb);
1842 return PTR_ERR(mrt);
1843 }
1844 if (!local) {
1845 if (IPCB(skb)->opt.router_alert) {
1846 if (ip_call_ra_chain(skb))
1847 return 0;
1848 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1849 /* IGMPv1 (and broken IGMPv2 implementations sort of
1850 * Cisco IOS <= 11.2(8)) do not put router alert
1851 * option to IGMP packets destined to routable
1852 * groups. It is very bad, because it means
1853 * that we can forward NO IGMP messages.
1854 */
1855 struct sock *mroute_sk;
1856
1857 mroute_sk = rcu_dereference(mrt->mroute_sk);
1858 if (mroute_sk) {
1859 nf_reset(skb);
1860 raw_rcv(mroute_sk, skb);
1861 return 0;
1862 }
1863 }
1864 }
1865
1866 /* already under rcu_read_lock() */
1867 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1868
1869 /*
1870 * No usable cache entry
1871 */
1872 if (cache == NULL) {
1873 int vif;
1874
1875 if (local) {
1876 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1877 ip_local_deliver(skb);
1878 if (skb2 == NULL)
1879 return -ENOBUFS;
1880 skb = skb2;
1881 }
1882
1883 read_lock(&mrt_lock);
1884 vif = ipmr_find_vif(mrt, skb->dev);
1885 if (vif >= 0) {
1886 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1887 read_unlock(&mrt_lock);
1888
1889 return err2;
1890 }
1891 read_unlock(&mrt_lock);
1892 kfree_skb(skb);
1893 return -ENODEV;
1894 }
1895
1896 read_lock(&mrt_lock);
1897 ip_mr_forward(net, mrt, skb, cache, local);
1898 read_unlock(&mrt_lock);
1899
1900 if (local)
1901 return ip_local_deliver(skb);
1902
1903 return 0;
1904
1905dont_forward:
1906 if (local)
1907 return ip_local_deliver(skb);
1908 kfree_skb(skb);
1909 return 0;
1910}
1911
1912#ifdef CONFIG_IP_PIMSM
1913/* called with rcu_read_lock() */
1914static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1915 unsigned int pimlen)
1916{
1917 struct net_device *reg_dev = NULL;
1918 struct iphdr *encap;
1919
1920 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1921 /*
1922 * Check that:
1923 * a. packet is really sent to a multicast group
1924 * b. packet is not a NULL-REGISTER
1925 * c. packet is not truncated
1926 */
1927 if (!ipv4_is_multicast(encap->daddr) ||
1928 encap->tot_len == 0 ||
1929 ntohs(encap->tot_len) + pimlen > skb->len)
1930 return 1;
1931
1932 read_lock(&mrt_lock);
1933 if (mrt->mroute_reg_vif_num >= 0)
1934 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1935 read_unlock(&mrt_lock);
1936
1937 if (reg_dev == NULL)
1938 return 1;
1939
1940 skb->mac_header = skb->network_header;
1941 skb_pull(skb, (u8 *)encap - skb->data);
1942 skb_reset_network_header(skb);
1943 skb->protocol = htons(ETH_P_IP);
1944 skb->ip_summed = CHECKSUM_NONE;
1945 skb->pkt_type = PACKET_HOST;
1946
1947 skb_tunnel_rx(skb, reg_dev);
1948
1949 netif_rx(skb);
1950
1951 return NET_RX_SUCCESS;
1952}
1953#endif
1954
1955#ifdef CONFIG_IP_PIMSM_V1
1956/*
1957 * Handle IGMP messages of PIMv1
1958 */
1959
1960int pim_rcv_v1(struct sk_buff *skb)
1961{
1962 struct igmphdr *pim;
1963 struct net *net = dev_net(skb->dev);
1964 struct mr_table *mrt;
1965
1966 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1967 goto drop;
1968
1969 pim = igmp_hdr(skb);
1970
1971 mrt = ipmr_rt_fib_lookup(net, skb);
1972 if (IS_ERR(mrt))
1973 goto drop;
1974 if (!mrt->mroute_do_pim ||
1975 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1976 goto drop;
1977
1978 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1979drop:
1980 kfree_skb(skb);
1981 }
1982 return 0;
1983}
1984#endif
1985
1986#ifdef CONFIG_IP_PIMSM_V2
1987static int pim_rcv(struct sk_buff *skb)
1988{
1989 struct pimreghdr *pim;
1990 struct net *net = dev_net(skb->dev);
1991 struct mr_table *mrt;
1992
1993 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1994 goto drop;
1995
1996 pim = (struct pimreghdr *)skb_transport_header(skb);
1997 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
1998 (pim->flags & PIM_NULL_REGISTER) ||
1999 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2000 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2001 goto drop;
2002
2003 mrt = ipmr_rt_fib_lookup(net, skb);
2004 if (IS_ERR(mrt))
2005 goto drop;
2006 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2007drop:
2008 kfree_skb(skb);
2009 }
2010 return 0;
2011}
2012#endif
2013
2014static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2015 struct mfc_cache *c, struct rtmsg *rtm)
2016{
2017 int ct;
2018 struct rtnexthop *nhp;
2019 u8 *b = skb_tail_pointer(skb);
2020 struct rtattr *mp_head;
2021
2022 /* If cache is unresolved, don't try to parse IIF and OIF */
2023 if (c->mfc_parent >= MAXVIFS)
2024 return -ENOENT;
2025
2026 if (VIF_EXISTS(mrt, c->mfc_parent))
2027 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
2028
2029 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2030
2031 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2032 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2033 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2034 goto rtattr_failure;
2035 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2036 nhp->rtnh_flags = 0;
2037 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2038 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2039 nhp->rtnh_len = sizeof(*nhp);
2040 }
2041 }
2042 mp_head->rta_type = RTA_MULTIPATH;
2043 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
2044 rtm->rtm_type = RTN_MULTICAST;
2045 return 1;
2046
2047rtattr_failure:
2048 nlmsg_trim(skb, b);
2049 return -EMSGSIZE;
2050}
2051
2052int ipmr_get_route(struct net *net, struct sk_buff *skb,
2053 __be32 saddr, __be32 daddr,
2054 struct rtmsg *rtm, int nowait)
2055{
2056 struct mfc_cache *cache;
2057 struct mr_table *mrt;
2058 int err;
2059
2060 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2061 if (mrt == NULL)
2062 return -ENOENT;
2063
2064 rcu_read_lock();
2065 cache = ipmr_cache_find(mrt, saddr, daddr);
2066
2067 if (cache == NULL) {
2068 struct sk_buff *skb2;
2069 struct iphdr *iph;
2070 struct net_device *dev;
2071 int vif = -1;
2072
2073 if (nowait) {
2074 rcu_read_unlock();
2075 return -EAGAIN;
2076 }
2077
2078 dev = skb->dev;
2079 read_lock(&mrt_lock);
2080 if (dev)
2081 vif = ipmr_find_vif(mrt, dev);
2082 if (vif < 0) {
2083 read_unlock(&mrt_lock);
2084 rcu_read_unlock();
2085 return -ENODEV;
2086 }
2087 skb2 = skb_clone(skb, GFP_ATOMIC);
2088 if (!skb2) {
2089 read_unlock(&mrt_lock);
2090 rcu_read_unlock();
2091 return -ENOMEM;
2092 }
2093
2094 skb_push(skb2, sizeof(struct iphdr));
2095 skb_reset_network_header(skb2);
2096 iph = ip_hdr(skb2);
2097 iph->ihl = sizeof(struct iphdr) >> 2;
2098 iph->saddr = saddr;
2099 iph->daddr = daddr;
2100 iph->version = 0;
2101 err = ipmr_cache_unresolved(mrt, vif, skb2);
2102 read_unlock(&mrt_lock);
2103 rcu_read_unlock();
2104 return err;
2105 }
2106
2107 read_lock(&mrt_lock);
2108 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2109 cache->mfc_flags |= MFC_NOTIFY;
2110 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2111 read_unlock(&mrt_lock);
2112 rcu_read_unlock();
2113 return err;
2114}
2115
2116static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2117 u32 pid, u32 seq, struct mfc_cache *c)
2118{
2119 struct nlmsghdr *nlh;
2120 struct rtmsg *rtm;
2121
2122 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2123 if (nlh == NULL)
2124 return -EMSGSIZE;
2125
2126 rtm = nlmsg_data(nlh);
2127 rtm->rtm_family = RTNL_FAMILY_IPMR;
2128 rtm->rtm_dst_len = 32;
2129 rtm->rtm_src_len = 32;
2130 rtm->rtm_tos = 0;
2131 rtm->rtm_table = mrt->id;
2132 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2133 goto nla_put_failure;
2134 rtm->rtm_type = RTN_MULTICAST;
2135 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2136 rtm->rtm_protocol = RTPROT_UNSPEC;
2137 rtm->rtm_flags = 0;
2138
2139 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
2140 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
2141 goto nla_put_failure;
2142 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2143 goto nla_put_failure;
2144
2145 return nlmsg_end(skb, nlh);
2146
2147nla_put_failure:
2148 nlmsg_cancel(skb, nlh);
2149 return -EMSGSIZE;
2150}
2151
2152static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2153{
2154 struct net *net = sock_net(skb->sk);
2155 struct mr_table *mrt;
2156 struct mfc_cache *mfc;
2157 unsigned int t = 0, s_t;
2158 unsigned int h = 0, s_h;
2159 unsigned int e = 0, s_e;
2160
2161 s_t = cb->args[0];
2162 s_h = cb->args[1];
2163 s_e = cb->args[2];
2164
2165 rcu_read_lock();
2166 ipmr_for_each_table(mrt, net) {
2167 if (t < s_t)
2168 goto next_table;
2169 if (t > s_t)
2170 s_h = 0;
2171 for (h = s_h; h < MFC_LINES; h++) {
2172 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2173 if (e < s_e)
2174 goto next_entry;
2175 if (ipmr_fill_mroute(mrt, skb,
2176 NETLINK_CB(cb->skb).pid,
2177 cb->nlh->nlmsg_seq,
2178 mfc) < 0)
2179 goto done;
2180next_entry:
2181 e++;
2182 }
2183 e = s_e = 0;
2184 }
2185 s_h = 0;
2186next_table:
2187 t++;
2188 }
2189done:
2190 rcu_read_unlock();
2191
2192 cb->args[2] = e;
2193 cb->args[1] = h;
2194 cb->args[0] = t;
2195
2196 return skb->len;
2197}
2198
2199#ifdef CONFIG_PROC_FS
2200/*
2201 * The /proc interfaces to multicast routing :
2202 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2203 */
2204struct ipmr_vif_iter {
2205 struct seq_net_private p;
2206 struct mr_table *mrt;
2207 int ct;
2208};
2209
2210static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2211 struct ipmr_vif_iter *iter,
2212 loff_t pos)
2213{
2214 struct mr_table *mrt = iter->mrt;
2215
2216 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2217 if (!VIF_EXISTS(mrt, iter->ct))
2218 continue;
2219 if (pos-- == 0)
2220 return &mrt->vif_table[iter->ct];
2221 }
2222 return NULL;
2223}
2224
2225static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2226 __acquires(mrt_lock)
2227{
2228 struct ipmr_vif_iter *iter = seq->private;
2229 struct net *net = seq_file_net(seq);
2230 struct mr_table *mrt;
2231
2232 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2233 if (mrt == NULL)
2234 return ERR_PTR(-ENOENT);
2235
2236 iter->mrt = mrt;
2237
2238 read_lock(&mrt_lock);
2239 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2240 : SEQ_START_TOKEN;
2241}
2242
2243static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2244{
2245 struct ipmr_vif_iter *iter = seq->private;
2246 struct net *net = seq_file_net(seq);
2247 struct mr_table *mrt = iter->mrt;
2248
2249 ++*pos;
2250 if (v == SEQ_START_TOKEN)
2251 return ipmr_vif_seq_idx(net, iter, 0);
2252
2253 while (++iter->ct < mrt->maxvif) {
2254 if (!VIF_EXISTS(mrt, iter->ct))
2255 continue;
2256 return &mrt->vif_table[iter->ct];
2257 }
2258 return NULL;
2259}
2260
2261static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2262 __releases(mrt_lock)
2263{
2264 read_unlock(&mrt_lock);
2265}
2266
2267static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2268{
2269 struct ipmr_vif_iter *iter = seq->private;
2270 struct mr_table *mrt = iter->mrt;
2271
2272 if (v == SEQ_START_TOKEN) {
2273 seq_puts(seq,
2274 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2275 } else {
2276 const struct vif_device *vif = v;
2277 const char *name = vif->dev ? vif->dev->name : "none";
2278
2279 seq_printf(seq,
2280 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2281 vif - mrt->vif_table,
2282 name, vif->bytes_in, vif->pkt_in,
2283 vif->bytes_out, vif->pkt_out,
2284 vif->flags, vif->local, vif->remote);
2285 }
2286 return 0;
2287}
2288
2289static const struct seq_operations ipmr_vif_seq_ops = {
2290 .start = ipmr_vif_seq_start,
2291 .next = ipmr_vif_seq_next,
2292 .stop = ipmr_vif_seq_stop,
2293 .show = ipmr_vif_seq_show,
2294};
2295
2296static int ipmr_vif_open(struct inode *inode, struct file *file)
2297{
2298 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2299 sizeof(struct ipmr_vif_iter));
2300}
2301
2302static const struct file_operations ipmr_vif_fops = {
2303 .owner = THIS_MODULE,
2304 .open = ipmr_vif_open,
2305 .read = seq_read,
2306 .llseek = seq_lseek,
2307 .release = seq_release_net,
2308};
2309
2310struct ipmr_mfc_iter {
2311 struct seq_net_private p;
2312 struct mr_table *mrt;
2313 struct list_head *cache;
2314 int ct;
2315};
2316
2317
2318static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2319 struct ipmr_mfc_iter *it, loff_t pos)
2320{
2321 struct mr_table *mrt = it->mrt;
2322 struct mfc_cache *mfc;
2323
2324 rcu_read_lock();
2325 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2326 it->cache = &mrt->mfc_cache_array[it->ct];
2327 list_for_each_entry_rcu(mfc, it->cache, list)
2328 if (pos-- == 0)
2329 return mfc;
2330 }
2331 rcu_read_unlock();
2332
2333 spin_lock_bh(&mfc_unres_lock);
2334 it->cache = &mrt->mfc_unres_queue;
2335 list_for_each_entry(mfc, it->cache, list)
2336 if (pos-- == 0)
2337 return mfc;
2338 spin_unlock_bh(&mfc_unres_lock);
2339
2340 it->cache = NULL;
2341 return NULL;
2342}
2343
2344
2345static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2346{
2347 struct ipmr_mfc_iter *it = seq->private;
2348 struct net *net = seq_file_net(seq);
2349 struct mr_table *mrt;
2350
2351 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2352 if (mrt == NULL)
2353 return ERR_PTR(-ENOENT);
2354
2355 it->mrt = mrt;
2356 it->cache = NULL;
2357 it->ct = 0;
2358 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2359 : SEQ_START_TOKEN;
2360}
2361
2362static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2363{
2364 struct mfc_cache *mfc = v;
2365 struct ipmr_mfc_iter *it = seq->private;
2366 struct net *net = seq_file_net(seq);
2367 struct mr_table *mrt = it->mrt;
2368
2369 ++*pos;
2370
2371 if (v == SEQ_START_TOKEN)
2372 return ipmr_mfc_seq_idx(net, seq->private, 0);
2373
2374 if (mfc->list.next != it->cache)
2375 return list_entry(mfc->list.next, struct mfc_cache, list);
2376
2377 if (it->cache == &mrt->mfc_unres_queue)
2378 goto end_of_list;
2379
2380 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2381
2382 while (++it->ct < MFC_LINES) {
2383 it->cache = &mrt->mfc_cache_array[it->ct];
2384 if (list_empty(it->cache))
2385 continue;
2386 return list_first_entry(it->cache, struct mfc_cache, list);
2387 }
2388
2389 /* exhausted cache_array, show unresolved */
2390 rcu_read_unlock();
2391 it->cache = &mrt->mfc_unres_queue;
2392 it->ct = 0;
2393
2394 spin_lock_bh(&mfc_unres_lock);
2395 if (!list_empty(it->cache))
2396 return list_first_entry(it->cache, struct mfc_cache, list);
2397
2398end_of_list:
2399 spin_unlock_bh(&mfc_unres_lock);
2400 it->cache = NULL;
2401
2402 return NULL;
2403}
2404
2405static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2406{
2407 struct ipmr_mfc_iter *it = seq->private;
2408 struct mr_table *mrt = it->mrt;
2409
2410 if (it->cache == &mrt->mfc_unres_queue)
2411 spin_unlock_bh(&mfc_unres_lock);
2412 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2413 rcu_read_unlock();
2414}
2415
2416static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2417{
2418 int n;
2419
2420 if (v == SEQ_START_TOKEN) {
2421 seq_puts(seq,
2422 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2423 } else {
2424 const struct mfc_cache *mfc = v;
2425 const struct ipmr_mfc_iter *it = seq->private;
2426 const struct mr_table *mrt = it->mrt;
2427
2428 seq_printf(seq, "%08X %08X %-3hd",
2429 (__force u32) mfc->mfc_mcastgrp,
2430 (__force u32) mfc->mfc_origin,
2431 mfc->mfc_parent);
2432
2433 if (it->cache != &mrt->mfc_unres_queue) {
2434 seq_printf(seq, " %8lu %8lu %8lu",
2435 mfc->mfc_un.res.pkt,
2436 mfc->mfc_un.res.bytes,
2437 mfc->mfc_un.res.wrong_if);
2438 for (n = mfc->mfc_un.res.minvif;
2439 n < mfc->mfc_un.res.maxvif; n++) {
2440 if (VIF_EXISTS(mrt, n) &&
2441 mfc->mfc_un.res.ttls[n] < 255)
2442 seq_printf(seq,
2443 " %2d:%-3d",
2444 n, mfc->mfc_un.res.ttls[n]);
2445 }
2446 } else {
2447 /* unresolved mfc_caches don't contain
2448 * pkt, bytes and wrong_if values
2449 */
2450 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2451 }
2452 seq_putc(seq, '\n');
2453 }
2454 return 0;
2455}
2456
2457static const struct seq_operations ipmr_mfc_seq_ops = {
2458 .start = ipmr_mfc_seq_start,
2459 .next = ipmr_mfc_seq_next,
2460 .stop = ipmr_mfc_seq_stop,
2461 .show = ipmr_mfc_seq_show,
2462};
2463
2464static int ipmr_mfc_open(struct inode *inode, struct file *file)
2465{
2466 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2467 sizeof(struct ipmr_mfc_iter));
2468}
2469
2470static const struct file_operations ipmr_mfc_fops = {
2471 .owner = THIS_MODULE,
2472 .open = ipmr_mfc_open,
2473 .read = seq_read,
2474 .llseek = seq_lseek,
2475 .release = seq_release_net,
2476};
2477#endif
2478
2479#ifdef CONFIG_IP_PIMSM_V2
2480static const struct net_protocol pim_protocol = {
2481 .handler = pim_rcv,
2482 .netns_ok = 1,
2483};
2484#endif
2485
2486
2487/*
2488 * Setup for IP multicast routing
2489 */
2490static int __net_init ipmr_net_init(struct net *net)
2491{
2492 int err;
2493
2494 err = ipmr_rules_init(net);
2495 if (err < 0)
2496 goto fail;
2497
2498#ifdef CONFIG_PROC_FS
2499 err = -ENOMEM;
2500 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2501 goto proc_vif_fail;
2502 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2503 goto proc_cache_fail;
2504#endif
2505 return 0;
2506
2507#ifdef CONFIG_PROC_FS
2508proc_cache_fail:
2509 proc_net_remove(net, "ip_mr_vif");
2510proc_vif_fail:
2511 ipmr_rules_exit(net);
2512#endif
2513fail:
2514 return err;
2515}
2516
2517static void __net_exit ipmr_net_exit(struct net *net)
2518{
2519#ifdef CONFIG_PROC_FS
2520 proc_net_remove(net, "ip_mr_cache");
2521 proc_net_remove(net, "ip_mr_vif");
2522#endif
2523 ipmr_rules_exit(net);
2524}
2525
2526static struct pernet_operations ipmr_net_ops = {
2527 .init = ipmr_net_init,
2528 .exit = ipmr_net_exit,
2529};
2530
2531int __init ip_mr_init(void)
2532{
2533 int err;
2534
2535 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2536 sizeof(struct mfc_cache),
2537 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2538 NULL);
2539 if (!mrt_cachep)
2540 return -ENOMEM;
2541
2542 err = register_pernet_subsys(&ipmr_net_ops);
2543 if (err)
2544 goto reg_pernet_fail;
2545
2546 err = register_netdevice_notifier(&ip_mr_notifier);
2547 if (err)
2548 goto reg_notif_fail;
2549#ifdef CONFIG_IP_PIMSM_V2
2550 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2551 pr_err("%s: can't add PIM protocol\n", __func__);
2552 err = -EAGAIN;
2553 goto add_proto_fail;
2554 }
2555#endif
2556 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2557 NULL, ipmr_rtm_dumproute, NULL);
2558 return 0;
2559
2560#ifdef CONFIG_IP_PIMSM_V2
2561add_proto_fail:
2562 unregister_netdevice_notifier(&ip_mr_notifier);
2563#endif
2564reg_notif_fail:
2565 unregister_pernet_subsys(&ipmr_net_ops);
2566reg_pernet_fail:
2567 kmem_cache_destroy(mrt_cachep);
2568 return err;
2569}