Loading...
1/*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <net/net_namespace.h>
17#include <net/sock.h>
18#include <net/fib_rules.h>
19
20int fib_default_rule_add(struct fib_rules_ops *ops,
21 u32 pref, u32 table, u32 flags)
22{
23 struct fib_rule *r;
24
25 r = kzalloc(ops->rule_size, GFP_KERNEL);
26 if (r == NULL)
27 return -ENOMEM;
28
29 atomic_set(&r->refcnt, 1);
30 r->action = FR_ACT_TO_TBL;
31 r->pref = pref;
32 r->table = table;
33 r->flags = flags;
34 r->fr_net = hold_net(ops->fro_net);
35
36 /* The lock is not required here, the list in unreacheable
37 * at the moment this function is called */
38 list_add_tail(&r->list, &ops->rules_list);
39 return 0;
40}
41EXPORT_SYMBOL(fib_default_rule_add);
42
43u32 fib_default_rule_pref(struct fib_rules_ops *ops)
44{
45 struct list_head *pos;
46 struct fib_rule *rule;
47
48 if (!list_empty(&ops->rules_list)) {
49 pos = ops->rules_list.next;
50 if (pos->next != &ops->rules_list) {
51 rule = list_entry(pos->next, struct fib_rule, list);
52 if (rule->pref)
53 return rule->pref - 1;
54 }
55 }
56
57 return 0;
58}
59EXPORT_SYMBOL(fib_default_rule_pref);
60
61static void notify_rule_change(int event, struct fib_rule *rule,
62 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
63 u32 pid);
64
65static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
66{
67 struct fib_rules_ops *ops;
68
69 rcu_read_lock();
70 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
71 if (ops->family == family) {
72 if (!try_module_get(ops->owner))
73 ops = NULL;
74 rcu_read_unlock();
75 return ops;
76 }
77 }
78 rcu_read_unlock();
79
80 return NULL;
81}
82
83static void rules_ops_put(struct fib_rules_ops *ops)
84{
85 if (ops)
86 module_put(ops->owner);
87}
88
89static void flush_route_cache(struct fib_rules_ops *ops)
90{
91 if (ops->flush_cache)
92 ops->flush_cache(ops);
93}
94
95static int __fib_rules_register(struct fib_rules_ops *ops)
96{
97 int err = -EEXIST;
98 struct fib_rules_ops *o;
99 struct net *net;
100
101 net = ops->fro_net;
102
103 if (ops->rule_size < sizeof(struct fib_rule))
104 return -EINVAL;
105
106 if (ops->match == NULL || ops->configure == NULL ||
107 ops->compare == NULL || ops->fill == NULL ||
108 ops->action == NULL)
109 return -EINVAL;
110
111 spin_lock(&net->rules_mod_lock);
112 list_for_each_entry(o, &net->rules_ops, list)
113 if (ops->family == o->family)
114 goto errout;
115
116 hold_net(net);
117 list_add_tail_rcu(&ops->list, &net->rules_ops);
118 err = 0;
119errout:
120 spin_unlock(&net->rules_mod_lock);
121
122 return err;
123}
124
125struct fib_rules_ops *
126fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
127{
128 struct fib_rules_ops *ops;
129 int err;
130
131 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
132 if (ops == NULL)
133 return ERR_PTR(-ENOMEM);
134
135 INIT_LIST_HEAD(&ops->rules_list);
136 ops->fro_net = net;
137
138 err = __fib_rules_register(ops);
139 if (err) {
140 kfree(ops);
141 ops = ERR_PTR(err);
142 }
143
144 return ops;
145}
146EXPORT_SYMBOL_GPL(fib_rules_register);
147
148static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
149{
150 struct fib_rule *rule, *tmp;
151
152 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
153 list_del_rcu(&rule->list);
154 fib_rule_put(rule);
155 }
156}
157
158static void fib_rules_put_rcu(struct rcu_head *head)
159{
160 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
161 struct net *net = ops->fro_net;
162
163 release_net(net);
164 kfree(ops);
165}
166
167void fib_rules_unregister(struct fib_rules_ops *ops)
168{
169 struct net *net = ops->fro_net;
170
171 spin_lock(&net->rules_mod_lock);
172 list_del_rcu(&ops->list);
173 fib_rules_cleanup_ops(ops);
174 spin_unlock(&net->rules_mod_lock);
175
176 call_rcu(&ops->rcu, fib_rules_put_rcu);
177}
178EXPORT_SYMBOL_GPL(fib_rules_unregister);
179
180static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
181 struct flowi *fl, int flags)
182{
183 int ret = 0;
184
185 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
186 goto out;
187
188 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
189 goto out;
190
191 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
192 goto out;
193
194 ret = ops->match(rule, fl, flags);
195out:
196 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
197}
198
199int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
200 int flags, struct fib_lookup_arg *arg)
201{
202 struct fib_rule *rule;
203 int err;
204
205 rcu_read_lock();
206
207 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
208jumped:
209 if (!fib_rule_match(rule, ops, fl, flags))
210 continue;
211
212 if (rule->action == FR_ACT_GOTO) {
213 struct fib_rule *target;
214
215 target = rcu_dereference(rule->ctarget);
216 if (target == NULL) {
217 continue;
218 } else {
219 rule = target;
220 goto jumped;
221 }
222 } else if (rule->action == FR_ACT_NOP)
223 continue;
224 else
225 err = ops->action(rule, fl, flags, arg);
226
227 if (err != -EAGAIN) {
228 if ((arg->flags & FIB_LOOKUP_NOREF) ||
229 likely(atomic_inc_not_zero(&rule->refcnt))) {
230 arg->rule = rule;
231 goto out;
232 }
233 break;
234 }
235 }
236
237 err = -ESRCH;
238out:
239 rcu_read_unlock();
240
241 return err;
242}
243EXPORT_SYMBOL_GPL(fib_rules_lookup);
244
245static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
246 struct fib_rules_ops *ops)
247{
248 int err = -EINVAL;
249
250 if (frh->src_len)
251 if (tb[FRA_SRC] == NULL ||
252 frh->src_len > (ops->addr_size * 8) ||
253 nla_len(tb[FRA_SRC]) != ops->addr_size)
254 goto errout;
255
256 if (frh->dst_len)
257 if (tb[FRA_DST] == NULL ||
258 frh->dst_len > (ops->addr_size * 8) ||
259 nla_len(tb[FRA_DST]) != ops->addr_size)
260 goto errout;
261
262 err = 0;
263errout:
264 return err;
265}
266
267static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
268{
269 struct net *net = sock_net(skb->sk);
270 struct fib_rule_hdr *frh = nlmsg_data(nlh);
271 struct fib_rules_ops *ops = NULL;
272 struct fib_rule *rule, *r, *last = NULL;
273 struct nlattr *tb[FRA_MAX+1];
274 int err = -EINVAL, unresolved = 0;
275
276 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
277 goto errout;
278
279 ops = lookup_rules_ops(net, frh->family);
280 if (ops == NULL) {
281 err = -EAFNOSUPPORT;
282 goto errout;
283 }
284
285 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
286 if (err < 0)
287 goto errout;
288
289 err = validate_rulemsg(frh, tb, ops);
290 if (err < 0)
291 goto errout;
292
293 rule = kzalloc(ops->rule_size, GFP_KERNEL);
294 if (rule == NULL) {
295 err = -ENOMEM;
296 goto errout;
297 }
298 rule->fr_net = hold_net(net);
299
300 if (tb[FRA_PRIORITY])
301 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
302
303 if (tb[FRA_IIFNAME]) {
304 struct net_device *dev;
305
306 rule->iifindex = -1;
307 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
308 dev = __dev_get_by_name(net, rule->iifname);
309 if (dev)
310 rule->iifindex = dev->ifindex;
311 }
312
313 if (tb[FRA_OIFNAME]) {
314 struct net_device *dev;
315
316 rule->oifindex = -1;
317 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
318 dev = __dev_get_by_name(net, rule->oifname);
319 if (dev)
320 rule->oifindex = dev->ifindex;
321 }
322
323 if (tb[FRA_FWMARK]) {
324 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
325 if (rule->mark)
326 /* compatibility: if the mark value is non-zero all bits
327 * are compared unless a mask is explicitly specified.
328 */
329 rule->mark_mask = 0xFFFFFFFF;
330 }
331
332 if (tb[FRA_FWMASK])
333 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
334
335 rule->action = frh->action;
336 rule->flags = frh->flags;
337 rule->table = frh_get_table(frh, tb);
338
339 if (!tb[FRA_PRIORITY] && ops->default_pref)
340 rule->pref = ops->default_pref(ops);
341
342 err = -EINVAL;
343 if (tb[FRA_GOTO]) {
344 if (rule->action != FR_ACT_GOTO)
345 goto errout_free;
346
347 rule->target = nla_get_u32(tb[FRA_GOTO]);
348 /* Backward jumps are prohibited to avoid endless loops */
349 if (rule->target <= rule->pref)
350 goto errout_free;
351
352 list_for_each_entry(r, &ops->rules_list, list) {
353 if (r->pref == rule->target) {
354 RCU_INIT_POINTER(rule->ctarget, r);
355 break;
356 }
357 }
358
359 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
360 unresolved = 1;
361 } else if (rule->action == FR_ACT_GOTO)
362 goto errout_free;
363
364 err = ops->configure(rule, skb, frh, tb);
365 if (err < 0)
366 goto errout_free;
367
368 list_for_each_entry(r, &ops->rules_list, list) {
369 if (r->pref > rule->pref)
370 break;
371 last = r;
372 }
373
374 fib_rule_get(rule);
375
376 if (last)
377 list_add_rcu(&rule->list, &last->list);
378 else
379 list_add_rcu(&rule->list, &ops->rules_list);
380
381 if (ops->unresolved_rules) {
382 /*
383 * There are unresolved goto rules in the list, check if
384 * any of them are pointing to this new rule.
385 */
386 list_for_each_entry(r, &ops->rules_list, list) {
387 if (r->action == FR_ACT_GOTO &&
388 r->target == rule->pref &&
389 rtnl_dereference(r->ctarget) == NULL) {
390 rcu_assign_pointer(r->ctarget, rule);
391 if (--ops->unresolved_rules == 0)
392 break;
393 }
394 }
395 }
396
397 if (rule->action == FR_ACT_GOTO)
398 ops->nr_goto_rules++;
399
400 if (unresolved)
401 ops->unresolved_rules++;
402
403 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
404 flush_route_cache(ops);
405 rules_ops_put(ops);
406 return 0;
407
408errout_free:
409 release_net(rule->fr_net);
410 kfree(rule);
411errout:
412 rules_ops_put(ops);
413 return err;
414}
415
416static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
417{
418 struct net *net = sock_net(skb->sk);
419 struct fib_rule_hdr *frh = nlmsg_data(nlh);
420 struct fib_rules_ops *ops = NULL;
421 struct fib_rule *rule, *tmp;
422 struct nlattr *tb[FRA_MAX+1];
423 int err = -EINVAL;
424
425 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
426 goto errout;
427
428 ops = lookup_rules_ops(net, frh->family);
429 if (ops == NULL) {
430 err = -EAFNOSUPPORT;
431 goto errout;
432 }
433
434 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
435 if (err < 0)
436 goto errout;
437
438 err = validate_rulemsg(frh, tb, ops);
439 if (err < 0)
440 goto errout;
441
442 list_for_each_entry(rule, &ops->rules_list, list) {
443 if (frh->action && (frh->action != rule->action))
444 continue;
445
446 if (frh->table && (frh_get_table(frh, tb) != rule->table))
447 continue;
448
449 if (tb[FRA_PRIORITY] &&
450 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
451 continue;
452
453 if (tb[FRA_IIFNAME] &&
454 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
455 continue;
456
457 if (tb[FRA_OIFNAME] &&
458 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
459 continue;
460
461 if (tb[FRA_FWMARK] &&
462 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
463 continue;
464
465 if (tb[FRA_FWMASK] &&
466 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
467 continue;
468
469 if (!ops->compare(rule, frh, tb))
470 continue;
471
472 if (rule->flags & FIB_RULE_PERMANENT) {
473 err = -EPERM;
474 goto errout;
475 }
476
477 list_del_rcu(&rule->list);
478
479 if (rule->action == FR_ACT_GOTO) {
480 ops->nr_goto_rules--;
481 if (rtnl_dereference(rule->ctarget) == NULL)
482 ops->unresolved_rules--;
483 }
484
485 /*
486 * Check if this rule is a target to any of them. If so,
487 * disable them. As this operation is eventually very
488 * expensive, it is only performed if goto rules have
489 * actually been added.
490 */
491 if (ops->nr_goto_rules > 0) {
492 list_for_each_entry(tmp, &ops->rules_list, list) {
493 if (rtnl_dereference(tmp->ctarget) == rule) {
494 RCU_INIT_POINTER(tmp->ctarget, NULL);
495 ops->unresolved_rules++;
496 }
497 }
498 }
499
500 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
501 NETLINK_CB(skb).pid);
502 fib_rule_put(rule);
503 flush_route_cache(ops);
504 rules_ops_put(ops);
505 return 0;
506 }
507
508 err = -ENOENT;
509errout:
510 rules_ops_put(ops);
511 return err;
512}
513
514static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
515 struct fib_rule *rule)
516{
517 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
518 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
519 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
520 + nla_total_size(4) /* FRA_PRIORITY */
521 + nla_total_size(4) /* FRA_TABLE */
522 + nla_total_size(4) /* FRA_FWMARK */
523 + nla_total_size(4); /* FRA_FWMASK */
524
525 if (ops->nlmsg_payload)
526 payload += ops->nlmsg_payload(rule);
527
528 return payload;
529}
530
531static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
532 u32 pid, u32 seq, int type, int flags,
533 struct fib_rules_ops *ops)
534{
535 struct nlmsghdr *nlh;
536 struct fib_rule_hdr *frh;
537
538 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
539 if (nlh == NULL)
540 return -EMSGSIZE;
541
542 frh = nlmsg_data(nlh);
543 frh->family = ops->family;
544 frh->table = rule->table;
545 if (nla_put_u32(skb, FRA_TABLE, rule->table))
546 goto nla_put_failure;
547 frh->res1 = 0;
548 frh->res2 = 0;
549 frh->action = rule->action;
550 frh->flags = rule->flags;
551
552 if (rule->action == FR_ACT_GOTO &&
553 rcu_access_pointer(rule->ctarget) == NULL)
554 frh->flags |= FIB_RULE_UNRESOLVED;
555
556 if (rule->iifname[0]) {
557 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
558 goto nla_put_failure;
559 if (rule->iifindex == -1)
560 frh->flags |= FIB_RULE_IIF_DETACHED;
561 }
562
563 if (rule->oifname[0]) {
564 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
565 goto nla_put_failure;
566 if (rule->oifindex == -1)
567 frh->flags |= FIB_RULE_OIF_DETACHED;
568 }
569
570 if ((rule->pref &&
571 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
572 (rule->mark &&
573 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
574 ((rule->mark_mask || rule->mark) &&
575 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
576 (rule->target &&
577 nla_put_u32(skb, FRA_GOTO, rule->target)))
578 goto nla_put_failure;
579 if (ops->fill(rule, skb, frh) < 0)
580 goto nla_put_failure;
581
582 return nlmsg_end(skb, nlh);
583
584nla_put_failure:
585 nlmsg_cancel(skb, nlh);
586 return -EMSGSIZE;
587}
588
589static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
590 struct fib_rules_ops *ops)
591{
592 int idx = 0;
593 struct fib_rule *rule;
594
595 rcu_read_lock();
596 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
597 if (idx < cb->args[1])
598 goto skip;
599
600 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
601 cb->nlh->nlmsg_seq, RTM_NEWRULE,
602 NLM_F_MULTI, ops) < 0)
603 break;
604skip:
605 idx++;
606 }
607 rcu_read_unlock();
608 cb->args[1] = idx;
609 rules_ops_put(ops);
610
611 return skb->len;
612}
613
614static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
615{
616 struct net *net = sock_net(skb->sk);
617 struct fib_rules_ops *ops;
618 int idx = 0, family;
619
620 family = rtnl_msg_family(cb->nlh);
621 if (family != AF_UNSPEC) {
622 /* Protocol specific dump request */
623 ops = lookup_rules_ops(net, family);
624 if (ops == NULL)
625 return -EAFNOSUPPORT;
626
627 return dump_rules(skb, cb, ops);
628 }
629
630 rcu_read_lock();
631 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
632 if (idx < cb->args[0] || !try_module_get(ops->owner))
633 goto skip;
634
635 if (dump_rules(skb, cb, ops) < 0)
636 break;
637
638 cb->args[1] = 0;
639skip:
640 idx++;
641 }
642 rcu_read_unlock();
643 cb->args[0] = idx;
644
645 return skb->len;
646}
647
648static void notify_rule_change(int event, struct fib_rule *rule,
649 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
650 u32 pid)
651{
652 struct net *net;
653 struct sk_buff *skb;
654 int err = -ENOBUFS;
655
656 net = ops->fro_net;
657 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
658 if (skb == NULL)
659 goto errout;
660
661 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
662 if (err < 0) {
663 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
664 WARN_ON(err == -EMSGSIZE);
665 kfree_skb(skb);
666 goto errout;
667 }
668
669 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
670 return;
671errout:
672 if (err < 0)
673 rtnl_set_sk_err(net, ops->nlgroup, err);
674}
675
676static void attach_rules(struct list_head *rules, struct net_device *dev)
677{
678 struct fib_rule *rule;
679
680 list_for_each_entry(rule, rules, list) {
681 if (rule->iifindex == -1 &&
682 strcmp(dev->name, rule->iifname) == 0)
683 rule->iifindex = dev->ifindex;
684 if (rule->oifindex == -1 &&
685 strcmp(dev->name, rule->oifname) == 0)
686 rule->oifindex = dev->ifindex;
687 }
688}
689
690static void detach_rules(struct list_head *rules, struct net_device *dev)
691{
692 struct fib_rule *rule;
693
694 list_for_each_entry(rule, rules, list) {
695 if (rule->iifindex == dev->ifindex)
696 rule->iifindex = -1;
697 if (rule->oifindex == dev->ifindex)
698 rule->oifindex = -1;
699 }
700}
701
702
703static int fib_rules_event(struct notifier_block *this, unsigned long event,
704 void *ptr)
705{
706 struct net_device *dev = ptr;
707 struct net *net = dev_net(dev);
708 struct fib_rules_ops *ops;
709
710 ASSERT_RTNL();
711
712 switch (event) {
713 case NETDEV_REGISTER:
714 list_for_each_entry(ops, &net->rules_ops, list)
715 attach_rules(&ops->rules_list, dev);
716 break;
717
718 case NETDEV_UNREGISTER:
719 list_for_each_entry(ops, &net->rules_ops, list)
720 detach_rules(&ops->rules_list, dev);
721 break;
722 }
723
724 return NOTIFY_DONE;
725}
726
727static struct notifier_block fib_rules_notifier = {
728 .notifier_call = fib_rules_event,
729};
730
731static int __net_init fib_rules_net_init(struct net *net)
732{
733 INIT_LIST_HEAD(&net->rules_ops);
734 spin_lock_init(&net->rules_mod_lock);
735 return 0;
736}
737
738static struct pernet_operations fib_rules_net_ops = {
739 .init = fib_rules_net_init,
740};
741
742static int __init fib_rules_init(void)
743{
744 int err;
745 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
746 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
747 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
748
749 err = register_pernet_subsys(&fib_rules_net_ops);
750 if (err < 0)
751 goto fail;
752
753 err = register_netdevice_notifier(&fib_rules_notifier);
754 if (err < 0)
755 goto fail_unregister;
756
757 return 0;
758
759fail_unregister:
760 unregister_pernet_subsys(&fib_rules_net_ops);
761fail:
762 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
763 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
764 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
765 return err;
766}
767
768subsys_initcall(fib_rules_init);
1/*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15#include <net/net_namespace.h>
16#include <net/sock.h>
17#include <net/fib_rules.h>
18
19int fib_default_rule_add(struct fib_rules_ops *ops,
20 u32 pref, u32 table, u32 flags)
21{
22 struct fib_rule *r;
23
24 r = kzalloc(ops->rule_size, GFP_KERNEL);
25 if (r == NULL)
26 return -ENOMEM;
27
28 atomic_set(&r->refcnt, 1);
29 r->action = FR_ACT_TO_TBL;
30 r->pref = pref;
31 r->table = table;
32 r->flags = flags;
33 r->fr_net = hold_net(ops->fro_net);
34
35 /* The lock is not required here, the list in unreacheable
36 * at the moment this function is called */
37 list_add_tail(&r->list, &ops->rules_list);
38 return 0;
39}
40EXPORT_SYMBOL(fib_default_rule_add);
41
42u32 fib_default_rule_pref(struct fib_rules_ops *ops)
43{
44 struct list_head *pos;
45 struct fib_rule *rule;
46
47 if (!list_empty(&ops->rules_list)) {
48 pos = ops->rules_list.next;
49 if (pos->next != &ops->rules_list) {
50 rule = list_entry(pos->next, struct fib_rule, list);
51 if (rule->pref)
52 return rule->pref - 1;
53 }
54 }
55
56 return 0;
57}
58EXPORT_SYMBOL(fib_default_rule_pref);
59
60static void notify_rule_change(int event, struct fib_rule *rule,
61 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
62 u32 pid);
63
64static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
65{
66 struct fib_rules_ops *ops;
67
68 rcu_read_lock();
69 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
70 if (ops->family == family) {
71 if (!try_module_get(ops->owner))
72 ops = NULL;
73 rcu_read_unlock();
74 return ops;
75 }
76 }
77 rcu_read_unlock();
78
79 return NULL;
80}
81
82static void rules_ops_put(struct fib_rules_ops *ops)
83{
84 if (ops)
85 module_put(ops->owner);
86}
87
88static void flush_route_cache(struct fib_rules_ops *ops)
89{
90 if (ops->flush_cache)
91 ops->flush_cache(ops);
92}
93
94static int __fib_rules_register(struct fib_rules_ops *ops)
95{
96 int err = -EEXIST;
97 struct fib_rules_ops *o;
98 struct net *net;
99
100 net = ops->fro_net;
101
102 if (ops->rule_size < sizeof(struct fib_rule))
103 return -EINVAL;
104
105 if (ops->match == NULL || ops->configure == NULL ||
106 ops->compare == NULL || ops->fill == NULL ||
107 ops->action == NULL)
108 return -EINVAL;
109
110 spin_lock(&net->rules_mod_lock);
111 list_for_each_entry(o, &net->rules_ops, list)
112 if (ops->family == o->family)
113 goto errout;
114
115 hold_net(net);
116 list_add_tail_rcu(&ops->list, &net->rules_ops);
117 err = 0;
118errout:
119 spin_unlock(&net->rules_mod_lock);
120
121 return err;
122}
123
124struct fib_rules_ops *
125fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
126{
127 struct fib_rules_ops *ops;
128 int err;
129
130 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
131 if (ops == NULL)
132 return ERR_PTR(-ENOMEM);
133
134 INIT_LIST_HEAD(&ops->rules_list);
135 ops->fro_net = net;
136
137 err = __fib_rules_register(ops);
138 if (err) {
139 kfree(ops);
140 ops = ERR_PTR(err);
141 }
142
143 return ops;
144}
145EXPORT_SYMBOL_GPL(fib_rules_register);
146
147static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
148{
149 struct fib_rule *rule, *tmp;
150
151 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
152 list_del_rcu(&rule->list);
153 fib_rule_put(rule);
154 }
155}
156
157static void fib_rules_put_rcu(struct rcu_head *head)
158{
159 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
160 struct net *net = ops->fro_net;
161
162 release_net(net);
163 kfree(ops);
164}
165
166void fib_rules_unregister(struct fib_rules_ops *ops)
167{
168 struct net *net = ops->fro_net;
169
170 spin_lock(&net->rules_mod_lock);
171 list_del_rcu(&ops->list);
172 fib_rules_cleanup_ops(ops);
173 spin_unlock(&net->rules_mod_lock);
174
175 call_rcu(&ops->rcu, fib_rules_put_rcu);
176}
177EXPORT_SYMBOL_GPL(fib_rules_unregister);
178
179static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
180 struct flowi *fl, int flags)
181{
182 int ret = 0;
183
184 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
185 goto out;
186
187 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
188 goto out;
189
190 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
191 goto out;
192
193 ret = ops->match(rule, fl, flags);
194out:
195 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
196}
197
198int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
199 int flags, struct fib_lookup_arg *arg)
200{
201 struct fib_rule *rule;
202 int err;
203
204 rcu_read_lock();
205
206 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
207jumped:
208 if (!fib_rule_match(rule, ops, fl, flags))
209 continue;
210
211 if (rule->action == FR_ACT_GOTO) {
212 struct fib_rule *target;
213
214 target = rcu_dereference(rule->ctarget);
215 if (target == NULL) {
216 continue;
217 } else {
218 rule = target;
219 goto jumped;
220 }
221 } else if (rule->action == FR_ACT_NOP)
222 continue;
223 else
224 err = ops->action(rule, fl, flags, arg);
225
226 if (err != -EAGAIN) {
227 if ((arg->flags & FIB_LOOKUP_NOREF) ||
228 likely(atomic_inc_not_zero(&rule->refcnt))) {
229 arg->rule = rule;
230 goto out;
231 }
232 break;
233 }
234 }
235
236 err = -ESRCH;
237out:
238 rcu_read_unlock();
239
240 return err;
241}
242EXPORT_SYMBOL_GPL(fib_rules_lookup);
243
244static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
245 struct fib_rules_ops *ops)
246{
247 int err = -EINVAL;
248
249 if (frh->src_len)
250 if (tb[FRA_SRC] == NULL ||
251 frh->src_len > (ops->addr_size * 8) ||
252 nla_len(tb[FRA_SRC]) != ops->addr_size)
253 goto errout;
254
255 if (frh->dst_len)
256 if (tb[FRA_DST] == NULL ||
257 frh->dst_len > (ops->addr_size * 8) ||
258 nla_len(tb[FRA_DST]) != ops->addr_size)
259 goto errout;
260
261 err = 0;
262errout:
263 return err;
264}
265
266static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
267{
268 struct net *net = sock_net(skb->sk);
269 struct fib_rule_hdr *frh = nlmsg_data(nlh);
270 struct fib_rules_ops *ops = NULL;
271 struct fib_rule *rule, *r, *last = NULL;
272 struct nlattr *tb[FRA_MAX+1];
273 int err = -EINVAL, unresolved = 0;
274
275 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
276 goto errout;
277
278 ops = lookup_rules_ops(net, frh->family);
279 if (ops == NULL) {
280 err = -EAFNOSUPPORT;
281 goto errout;
282 }
283
284 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
285 if (err < 0)
286 goto errout;
287
288 err = validate_rulemsg(frh, tb, ops);
289 if (err < 0)
290 goto errout;
291
292 rule = kzalloc(ops->rule_size, GFP_KERNEL);
293 if (rule == NULL) {
294 err = -ENOMEM;
295 goto errout;
296 }
297 rule->fr_net = hold_net(net);
298
299 if (tb[FRA_PRIORITY])
300 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
301
302 if (tb[FRA_IIFNAME]) {
303 struct net_device *dev;
304
305 rule->iifindex = -1;
306 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
307 dev = __dev_get_by_name(net, rule->iifname);
308 if (dev)
309 rule->iifindex = dev->ifindex;
310 }
311
312 if (tb[FRA_OIFNAME]) {
313 struct net_device *dev;
314
315 rule->oifindex = -1;
316 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
317 dev = __dev_get_by_name(net, rule->oifname);
318 if (dev)
319 rule->oifindex = dev->ifindex;
320 }
321
322 if (tb[FRA_FWMARK]) {
323 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
324 if (rule->mark)
325 /* compatibility: if the mark value is non-zero all bits
326 * are compared unless a mask is explicitly specified.
327 */
328 rule->mark_mask = 0xFFFFFFFF;
329 }
330
331 if (tb[FRA_FWMASK])
332 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
333
334 rule->action = frh->action;
335 rule->flags = frh->flags;
336 rule->table = frh_get_table(frh, tb);
337
338 if (!tb[FRA_PRIORITY] && ops->default_pref)
339 rule->pref = ops->default_pref(ops);
340
341 err = -EINVAL;
342 if (tb[FRA_GOTO]) {
343 if (rule->action != FR_ACT_GOTO)
344 goto errout_free;
345
346 rule->target = nla_get_u32(tb[FRA_GOTO]);
347 /* Backward jumps are prohibited to avoid endless loops */
348 if (rule->target <= rule->pref)
349 goto errout_free;
350
351 list_for_each_entry(r, &ops->rules_list, list) {
352 if (r->pref == rule->target) {
353 RCU_INIT_POINTER(rule->ctarget, r);
354 break;
355 }
356 }
357
358 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
359 unresolved = 1;
360 } else if (rule->action == FR_ACT_GOTO)
361 goto errout_free;
362
363 err = ops->configure(rule, skb, frh, tb);
364 if (err < 0)
365 goto errout_free;
366
367 list_for_each_entry(r, &ops->rules_list, list) {
368 if (r->pref > rule->pref)
369 break;
370 last = r;
371 }
372
373 fib_rule_get(rule);
374
375 if (last)
376 list_add_rcu(&rule->list, &last->list);
377 else
378 list_add_rcu(&rule->list, &ops->rules_list);
379
380 if (ops->unresolved_rules) {
381 /*
382 * There are unresolved goto rules in the list, check if
383 * any of them are pointing to this new rule.
384 */
385 list_for_each_entry(r, &ops->rules_list, list) {
386 if (r->action == FR_ACT_GOTO &&
387 r->target == rule->pref &&
388 rtnl_dereference(r->ctarget) == NULL) {
389 rcu_assign_pointer(r->ctarget, rule);
390 if (--ops->unresolved_rules == 0)
391 break;
392 }
393 }
394 }
395
396 if (rule->action == FR_ACT_GOTO)
397 ops->nr_goto_rules++;
398
399 if (unresolved)
400 ops->unresolved_rules++;
401
402 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
403 flush_route_cache(ops);
404 rules_ops_put(ops);
405 return 0;
406
407errout_free:
408 release_net(rule->fr_net);
409 kfree(rule);
410errout:
411 rules_ops_put(ops);
412 return err;
413}
414
415static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
416{
417 struct net *net = sock_net(skb->sk);
418 struct fib_rule_hdr *frh = nlmsg_data(nlh);
419 struct fib_rules_ops *ops = NULL;
420 struct fib_rule *rule, *tmp;
421 struct nlattr *tb[FRA_MAX+1];
422 int err = -EINVAL;
423
424 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
425 goto errout;
426
427 ops = lookup_rules_ops(net, frh->family);
428 if (ops == NULL) {
429 err = -EAFNOSUPPORT;
430 goto errout;
431 }
432
433 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
434 if (err < 0)
435 goto errout;
436
437 err = validate_rulemsg(frh, tb, ops);
438 if (err < 0)
439 goto errout;
440
441 list_for_each_entry(rule, &ops->rules_list, list) {
442 if (frh->action && (frh->action != rule->action))
443 continue;
444
445 if (frh->table && (frh_get_table(frh, tb) != rule->table))
446 continue;
447
448 if (tb[FRA_PRIORITY] &&
449 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
450 continue;
451
452 if (tb[FRA_IIFNAME] &&
453 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
454 continue;
455
456 if (tb[FRA_OIFNAME] &&
457 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
458 continue;
459
460 if (tb[FRA_FWMARK] &&
461 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
462 continue;
463
464 if (tb[FRA_FWMASK] &&
465 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
466 continue;
467
468 if (!ops->compare(rule, frh, tb))
469 continue;
470
471 if (rule->flags & FIB_RULE_PERMANENT) {
472 err = -EPERM;
473 goto errout;
474 }
475
476 list_del_rcu(&rule->list);
477
478 if (rule->action == FR_ACT_GOTO) {
479 ops->nr_goto_rules--;
480 if (rtnl_dereference(rule->ctarget) == NULL)
481 ops->unresolved_rules--;
482 }
483
484 /*
485 * Check if this rule is a target to any of them. If so,
486 * disable them. As this operation is eventually very
487 * expensive, it is only performed if goto rules have
488 * actually been added.
489 */
490 if (ops->nr_goto_rules > 0) {
491 list_for_each_entry(tmp, &ops->rules_list, list) {
492 if (rtnl_dereference(tmp->ctarget) == rule) {
493 rcu_assign_pointer(tmp->ctarget, NULL);
494 ops->unresolved_rules++;
495 }
496 }
497 }
498
499 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
500 NETLINK_CB(skb).pid);
501 fib_rule_put(rule);
502 flush_route_cache(ops);
503 rules_ops_put(ops);
504 return 0;
505 }
506
507 err = -ENOENT;
508errout:
509 rules_ops_put(ops);
510 return err;
511}
512
513static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
514 struct fib_rule *rule)
515{
516 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
517 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
518 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
519 + nla_total_size(4) /* FRA_PRIORITY */
520 + nla_total_size(4) /* FRA_TABLE */
521 + nla_total_size(4) /* FRA_FWMARK */
522 + nla_total_size(4); /* FRA_FWMASK */
523
524 if (ops->nlmsg_payload)
525 payload += ops->nlmsg_payload(rule);
526
527 return payload;
528}
529
530static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
531 u32 pid, u32 seq, int type, int flags,
532 struct fib_rules_ops *ops)
533{
534 struct nlmsghdr *nlh;
535 struct fib_rule_hdr *frh;
536
537 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
538 if (nlh == NULL)
539 return -EMSGSIZE;
540
541 frh = nlmsg_data(nlh);
542 frh->family = ops->family;
543 frh->table = rule->table;
544 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
545 frh->res1 = 0;
546 frh->res2 = 0;
547 frh->action = rule->action;
548 frh->flags = rule->flags;
549
550 if (rule->action == FR_ACT_GOTO &&
551 rcu_dereference_raw(rule->ctarget) == NULL)
552 frh->flags |= FIB_RULE_UNRESOLVED;
553
554 if (rule->iifname[0]) {
555 NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
556
557 if (rule->iifindex == -1)
558 frh->flags |= FIB_RULE_IIF_DETACHED;
559 }
560
561 if (rule->oifname[0]) {
562 NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
563
564 if (rule->oifindex == -1)
565 frh->flags |= FIB_RULE_OIF_DETACHED;
566 }
567
568 if (rule->pref)
569 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
570
571 if (rule->mark)
572 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
573
574 if (rule->mark_mask || rule->mark)
575 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
576
577 if (rule->target)
578 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
579
580 if (ops->fill(rule, skb, frh) < 0)
581 goto nla_put_failure;
582
583 return nlmsg_end(skb, nlh);
584
585nla_put_failure:
586 nlmsg_cancel(skb, nlh);
587 return -EMSGSIZE;
588}
589
590static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
591 struct fib_rules_ops *ops)
592{
593 int idx = 0;
594 struct fib_rule *rule;
595
596 rcu_read_lock();
597 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
598 if (idx < cb->args[1])
599 goto skip;
600
601 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
602 cb->nlh->nlmsg_seq, RTM_NEWRULE,
603 NLM_F_MULTI, ops) < 0)
604 break;
605skip:
606 idx++;
607 }
608 rcu_read_unlock();
609 cb->args[1] = idx;
610 rules_ops_put(ops);
611
612 return skb->len;
613}
614
615static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
616{
617 struct net *net = sock_net(skb->sk);
618 struct fib_rules_ops *ops;
619 int idx = 0, family;
620
621 family = rtnl_msg_family(cb->nlh);
622 if (family != AF_UNSPEC) {
623 /* Protocol specific dump request */
624 ops = lookup_rules_ops(net, family);
625 if (ops == NULL)
626 return -EAFNOSUPPORT;
627
628 return dump_rules(skb, cb, ops);
629 }
630
631 rcu_read_lock();
632 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
633 if (idx < cb->args[0] || !try_module_get(ops->owner))
634 goto skip;
635
636 if (dump_rules(skb, cb, ops) < 0)
637 break;
638
639 cb->args[1] = 0;
640skip:
641 idx++;
642 }
643 rcu_read_unlock();
644 cb->args[0] = idx;
645
646 return skb->len;
647}
648
649static void notify_rule_change(int event, struct fib_rule *rule,
650 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
651 u32 pid)
652{
653 struct net *net;
654 struct sk_buff *skb;
655 int err = -ENOBUFS;
656
657 net = ops->fro_net;
658 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
659 if (skb == NULL)
660 goto errout;
661
662 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
663 if (err < 0) {
664 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
665 WARN_ON(err == -EMSGSIZE);
666 kfree_skb(skb);
667 goto errout;
668 }
669
670 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
671 return;
672errout:
673 if (err < 0)
674 rtnl_set_sk_err(net, ops->nlgroup, err);
675}
676
677static void attach_rules(struct list_head *rules, struct net_device *dev)
678{
679 struct fib_rule *rule;
680
681 list_for_each_entry(rule, rules, list) {
682 if (rule->iifindex == -1 &&
683 strcmp(dev->name, rule->iifname) == 0)
684 rule->iifindex = dev->ifindex;
685 if (rule->oifindex == -1 &&
686 strcmp(dev->name, rule->oifname) == 0)
687 rule->oifindex = dev->ifindex;
688 }
689}
690
691static void detach_rules(struct list_head *rules, struct net_device *dev)
692{
693 struct fib_rule *rule;
694
695 list_for_each_entry(rule, rules, list) {
696 if (rule->iifindex == dev->ifindex)
697 rule->iifindex = -1;
698 if (rule->oifindex == dev->ifindex)
699 rule->oifindex = -1;
700 }
701}
702
703
704static int fib_rules_event(struct notifier_block *this, unsigned long event,
705 void *ptr)
706{
707 struct net_device *dev = ptr;
708 struct net *net = dev_net(dev);
709 struct fib_rules_ops *ops;
710
711 ASSERT_RTNL();
712
713 switch (event) {
714 case NETDEV_REGISTER:
715 list_for_each_entry(ops, &net->rules_ops, list)
716 attach_rules(&ops->rules_list, dev);
717 break;
718
719 case NETDEV_UNREGISTER:
720 list_for_each_entry(ops, &net->rules_ops, list)
721 detach_rules(&ops->rules_list, dev);
722 break;
723 }
724
725 return NOTIFY_DONE;
726}
727
728static struct notifier_block fib_rules_notifier = {
729 .notifier_call = fib_rules_event,
730};
731
732static int __net_init fib_rules_net_init(struct net *net)
733{
734 INIT_LIST_HEAD(&net->rules_ops);
735 spin_lock_init(&net->rules_mod_lock);
736 return 0;
737}
738
739static struct pernet_operations fib_rules_net_ops = {
740 .init = fib_rules_net_init,
741};
742
743static int __init fib_rules_init(void)
744{
745 int err;
746 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
747 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
748 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
749
750 err = register_pernet_subsys(&fib_rules_net_ops);
751 if (err < 0)
752 goto fail;
753
754 err = register_netdevice_notifier(&fib_rules_notifier);
755 if (err < 0)
756 goto fail_unregister;
757
758 return 0;
759
760fail_unregister:
761 unregister_pernet_subsys(&fib_rules_net_ops);
762fail:
763 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
764 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
765 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
766 return err;
767}
768
769subsys_initcall(fib_rules_init);