Loading...
1/*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15#include <net/net_namespace.h>
16#include <net/sock.h>
17#include <net/fib_rules.h>
18
19int fib_default_rule_add(struct fib_rules_ops *ops,
20 u32 pref, u32 table, u32 flags)
21{
22 struct fib_rule *r;
23
24 r = kzalloc(ops->rule_size, GFP_KERNEL);
25 if (r == NULL)
26 return -ENOMEM;
27
28 atomic_set(&r->refcnt, 1);
29 r->action = FR_ACT_TO_TBL;
30 r->pref = pref;
31 r->table = table;
32 r->flags = flags;
33 r->fr_net = hold_net(ops->fro_net);
34
35 /* The lock is not required here, the list in unreacheable
36 * at the moment this function is called */
37 list_add_tail(&r->list, &ops->rules_list);
38 return 0;
39}
40EXPORT_SYMBOL(fib_default_rule_add);
41
42u32 fib_default_rule_pref(struct fib_rules_ops *ops)
43{
44 struct list_head *pos;
45 struct fib_rule *rule;
46
47 if (!list_empty(&ops->rules_list)) {
48 pos = ops->rules_list.next;
49 if (pos->next != &ops->rules_list) {
50 rule = list_entry(pos->next, struct fib_rule, list);
51 if (rule->pref)
52 return rule->pref - 1;
53 }
54 }
55
56 return 0;
57}
58EXPORT_SYMBOL(fib_default_rule_pref);
59
60static void notify_rule_change(int event, struct fib_rule *rule,
61 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
62 u32 pid);
63
64static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
65{
66 struct fib_rules_ops *ops;
67
68 rcu_read_lock();
69 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
70 if (ops->family == family) {
71 if (!try_module_get(ops->owner))
72 ops = NULL;
73 rcu_read_unlock();
74 return ops;
75 }
76 }
77 rcu_read_unlock();
78
79 return NULL;
80}
81
82static void rules_ops_put(struct fib_rules_ops *ops)
83{
84 if (ops)
85 module_put(ops->owner);
86}
87
88static void flush_route_cache(struct fib_rules_ops *ops)
89{
90 if (ops->flush_cache)
91 ops->flush_cache(ops);
92}
93
94static int __fib_rules_register(struct fib_rules_ops *ops)
95{
96 int err = -EEXIST;
97 struct fib_rules_ops *o;
98 struct net *net;
99
100 net = ops->fro_net;
101
102 if (ops->rule_size < sizeof(struct fib_rule))
103 return -EINVAL;
104
105 if (ops->match == NULL || ops->configure == NULL ||
106 ops->compare == NULL || ops->fill == NULL ||
107 ops->action == NULL)
108 return -EINVAL;
109
110 spin_lock(&net->rules_mod_lock);
111 list_for_each_entry(o, &net->rules_ops, list)
112 if (ops->family == o->family)
113 goto errout;
114
115 hold_net(net);
116 list_add_tail_rcu(&ops->list, &net->rules_ops);
117 err = 0;
118errout:
119 spin_unlock(&net->rules_mod_lock);
120
121 return err;
122}
123
124struct fib_rules_ops *
125fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
126{
127 struct fib_rules_ops *ops;
128 int err;
129
130 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
131 if (ops == NULL)
132 return ERR_PTR(-ENOMEM);
133
134 INIT_LIST_HEAD(&ops->rules_list);
135 ops->fro_net = net;
136
137 err = __fib_rules_register(ops);
138 if (err) {
139 kfree(ops);
140 ops = ERR_PTR(err);
141 }
142
143 return ops;
144}
145EXPORT_SYMBOL_GPL(fib_rules_register);
146
147static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
148{
149 struct fib_rule *rule, *tmp;
150
151 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
152 list_del_rcu(&rule->list);
153 fib_rule_put(rule);
154 }
155}
156
157static void fib_rules_put_rcu(struct rcu_head *head)
158{
159 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
160 struct net *net = ops->fro_net;
161
162 release_net(net);
163 kfree(ops);
164}
165
166void fib_rules_unregister(struct fib_rules_ops *ops)
167{
168 struct net *net = ops->fro_net;
169
170 spin_lock(&net->rules_mod_lock);
171 list_del_rcu(&ops->list);
172 fib_rules_cleanup_ops(ops);
173 spin_unlock(&net->rules_mod_lock);
174
175 call_rcu(&ops->rcu, fib_rules_put_rcu);
176}
177EXPORT_SYMBOL_GPL(fib_rules_unregister);
178
179static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
180 struct flowi *fl, int flags)
181{
182 int ret = 0;
183
184 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
185 goto out;
186
187 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
188 goto out;
189
190 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
191 goto out;
192
193 ret = ops->match(rule, fl, flags);
194out:
195 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
196}
197
198int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
199 int flags, struct fib_lookup_arg *arg)
200{
201 struct fib_rule *rule;
202 int err;
203
204 rcu_read_lock();
205
206 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
207jumped:
208 if (!fib_rule_match(rule, ops, fl, flags))
209 continue;
210
211 if (rule->action == FR_ACT_GOTO) {
212 struct fib_rule *target;
213
214 target = rcu_dereference(rule->ctarget);
215 if (target == NULL) {
216 continue;
217 } else {
218 rule = target;
219 goto jumped;
220 }
221 } else if (rule->action == FR_ACT_NOP)
222 continue;
223 else
224 err = ops->action(rule, fl, flags, arg);
225
226 if (err != -EAGAIN) {
227 if ((arg->flags & FIB_LOOKUP_NOREF) ||
228 likely(atomic_inc_not_zero(&rule->refcnt))) {
229 arg->rule = rule;
230 goto out;
231 }
232 break;
233 }
234 }
235
236 err = -ESRCH;
237out:
238 rcu_read_unlock();
239
240 return err;
241}
242EXPORT_SYMBOL_GPL(fib_rules_lookup);
243
244static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
245 struct fib_rules_ops *ops)
246{
247 int err = -EINVAL;
248
249 if (frh->src_len)
250 if (tb[FRA_SRC] == NULL ||
251 frh->src_len > (ops->addr_size * 8) ||
252 nla_len(tb[FRA_SRC]) != ops->addr_size)
253 goto errout;
254
255 if (frh->dst_len)
256 if (tb[FRA_DST] == NULL ||
257 frh->dst_len > (ops->addr_size * 8) ||
258 nla_len(tb[FRA_DST]) != ops->addr_size)
259 goto errout;
260
261 err = 0;
262errout:
263 return err;
264}
265
266static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
267{
268 struct net *net = sock_net(skb->sk);
269 struct fib_rule_hdr *frh = nlmsg_data(nlh);
270 struct fib_rules_ops *ops = NULL;
271 struct fib_rule *rule, *r, *last = NULL;
272 struct nlattr *tb[FRA_MAX+1];
273 int err = -EINVAL, unresolved = 0;
274
275 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
276 goto errout;
277
278 ops = lookup_rules_ops(net, frh->family);
279 if (ops == NULL) {
280 err = -EAFNOSUPPORT;
281 goto errout;
282 }
283
284 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
285 if (err < 0)
286 goto errout;
287
288 err = validate_rulemsg(frh, tb, ops);
289 if (err < 0)
290 goto errout;
291
292 rule = kzalloc(ops->rule_size, GFP_KERNEL);
293 if (rule == NULL) {
294 err = -ENOMEM;
295 goto errout;
296 }
297 rule->fr_net = hold_net(net);
298
299 if (tb[FRA_PRIORITY])
300 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
301
302 if (tb[FRA_IIFNAME]) {
303 struct net_device *dev;
304
305 rule->iifindex = -1;
306 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
307 dev = __dev_get_by_name(net, rule->iifname);
308 if (dev)
309 rule->iifindex = dev->ifindex;
310 }
311
312 if (tb[FRA_OIFNAME]) {
313 struct net_device *dev;
314
315 rule->oifindex = -1;
316 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
317 dev = __dev_get_by_name(net, rule->oifname);
318 if (dev)
319 rule->oifindex = dev->ifindex;
320 }
321
322 if (tb[FRA_FWMARK]) {
323 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
324 if (rule->mark)
325 /* compatibility: if the mark value is non-zero all bits
326 * are compared unless a mask is explicitly specified.
327 */
328 rule->mark_mask = 0xFFFFFFFF;
329 }
330
331 if (tb[FRA_FWMASK])
332 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
333
334 rule->action = frh->action;
335 rule->flags = frh->flags;
336 rule->table = frh_get_table(frh, tb);
337
338 if (!tb[FRA_PRIORITY] && ops->default_pref)
339 rule->pref = ops->default_pref(ops);
340
341 err = -EINVAL;
342 if (tb[FRA_GOTO]) {
343 if (rule->action != FR_ACT_GOTO)
344 goto errout_free;
345
346 rule->target = nla_get_u32(tb[FRA_GOTO]);
347 /* Backward jumps are prohibited to avoid endless loops */
348 if (rule->target <= rule->pref)
349 goto errout_free;
350
351 list_for_each_entry(r, &ops->rules_list, list) {
352 if (r->pref == rule->target) {
353 RCU_INIT_POINTER(rule->ctarget, r);
354 break;
355 }
356 }
357
358 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
359 unresolved = 1;
360 } else if (rule->action == FR_ACT_GOTO)
361 goto errout_free;
362
363 err = ops->configure(rule, skb, frh, tb);
364 if (err < 0)
365 goto errout_free;
366
367 list_for_each_entry(r, &ops->rules_list, list) {
368 if (r->pref > rule->pref)
369 break;
370 last = r;
371 }
372
373 fib_rule_get(rule);
374
375 if (last)
376 list_add_rcu(&rule->list, &last->list);
377 else
378 list_add_rcu(&rule->list, &ops->rules_list);
379
380 if (ops->unresolved_rules) {
381 /*
382 * There are unresolved goto rules in the list, check if
383 * any of them are pointing to this new rule.
384 */
385 list_for_each_entry(r, &ops->rules_list, list) {
386 if (r->action == FR_ACT_GOTO &&
387 r->target == rule->pref &&
388 rtnl_dereference(r->ctarget) == NULL) {
389 rcu_assign_pointer(r->ctarget, rule);
390 if (--ops->unresolved_rules == 0)
391 break;
392 }
393 }
394 }
395
396 if (rule->action == FR_ACT_GOTO)
397 ops->nr_goto_rules++;
398
399 if (unresolved)
400 ops->unresolved_rules++;
401
402 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
403 flush_route_cache(ops);
404 rules_ops_put(ops);
405 return 0;
406
407errout_free:
408 release_net(rule->fr_net);
409 kfree(rule);
410errout:
411 rules_ops_put(ops);
412 return err;
413}
414
415static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
416{
417 struct net *net = sock_net(skb->sk);
418 struct fib_rule_hdr *frh = nlmsg_data(nlh);
419 struct fib_rules_ops *ops = NULL;
420 struct fib_rule *rule, *tmp;
421 struct nlattr *tb[FRA_MAX+1];
422 int err = -EINVAL;
423
424 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
425 goto errout;
426
427 ops = lookup_rules_ops(net, frh->family);
428 if (ops == NULL) {
429 err = -EAFNOSUPPORT;
430 goto errout;
431 }
432
433 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
434 if (err < 0)
435 goto errout;
436
437 err = validate_rulemsg(frh, tb, ops);
438 if (err < 0)
439 goto errout;
440
441 list_for_each_entry(rule, &ops->rules_list, list) {
442 if (frh->action && (frh->action != rule->action))
443 continue;
444
445 if (frh->table && (frh_get_table(frh, tb) != rule->table))
446 continue;
447
448 if (tb[FRA_PRIORITY] &&
449 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
450 continue;
451
452 if (tb[FRA_IIFNAME] &&
453 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
454 continue;
455
456 if (tb[FRA_OIFNAME] &&
457 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
458 continue;
459
460 if (tb[FRA_FWMARK] &&
461 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
462 continue;
463
464 if (tb[FRA_FWMASK] &&
465 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
466 continue;
467
468 if (!ops->compare(rule, frh, tb))
469 continue;
470
471 if (rule->flags & FIB_RULE_PERMANENT) {
472 err = -EPERM;
473 goto errout;
474 }
475
476 list_del_rcu(&rule->list);
477
478 if (rule->action == FR_ACT_GOTO) {
479 ops->nr_goto_rules--;
480 if (rtnl_dereference(rule->ctarget) == NULL)
481 ops->unresolved_rules--;
482 }
483
484 /*
485 * Check if this rule is a target to any of them. If so,
486 * disable them. As this operation is eventually very
487 * expensive, it is only performed if goto rules have
488 * actually been added.
489 */
490 if (ops->nr_goto_rules > 0) {
491 list_for_each_entry(tmp, &ops->rules_list, list) {
492 if (rtnl_dereference(tmp->ctarget) == rule) {
493 rcu_assign_pointer(tmp->ctarget, NULL);
494 ops->unresolved_rules++;
495 }
496 }
497 }
498
499 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
500 NETLINK_CB(skb).pid);
501 fib_rule_put(rule);
502 flush_route_cache(ops);
503 rules_ops_put(ops);
504 return 0;
505 }
506
507 err = -ENOENT;
508errout:
509 rules_ops_put(ops);
510 return err;
511}
512
513static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
514 struct fib_rule *rule)
515{
516 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
517 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
518 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
519 + nla_total_size(4) /* FRA_PRIORITY */
520 + nla_total_size(4) /* FRA_TABLE */
521 + nla_total_size(4) /* FRA_FWMARK */
522 + nla_total_size(4); /* FRA_FWMASK */
523
524 if (ops->nlmsg_payload)
525 payload += ops->nlmsg_payload(rule);
526
527 return payload;
528}
529
530static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
531 u32 pid, u32 seq, int type, int flags,
532 struct fib_rules_ops *ops)
533{
534 struct nlmsghdr *nlh;
535 struct fib_rule_hdr *frh;
536
537 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
538 if (nlh == NULL)
539 return -EMSGSIZE;
540
541 frh = nlmsg_data(nlh);
542 frh->family = ops->family;
543 frh->table = rule->table;
544 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
545 frh->res1 = 0;
546 frh->res2 = 0;
547 frh->action = rule->action;
548 frh->flags = rule->flags;
549
550 if (rule->action == FR_ACT_GOTO &&
551 rcu_dereference_raw(rule->ctarget) == NULL)
552 frh->flags |= FIB_RULE_UNRESOLVED;
553
554 if (rule->iifname[0]) {
555 NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
556
557 if (rule->iifindex == -1)
558 frh->flags |= FIB_RULE_IIF_DETACHED;
559 }
560
561 if (rule->oifname[0]) {
562 NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
563
564 if (rule->oifindex == -1)
565 frh->flags |= FIB_RULE_OIF_DETACHED;
566 }
567
568 if (rule->pref)
569 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
570
571 if (rule->mark)
572 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
573
574 if (rule->mark_mask || rule->mark)
575 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
576
577 if (rule->target)
578 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
579
580 if (ops->fill(rule, skb, frh) < 0)
581 goto nla_put_failure;
582
583 return nlmsg_end(skb, nlh);
584
585nla_put_failure:
586 nlmsg_cancel(skb, nlh);
587 return -EMSGSIZE;
588}
589
590static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
591 struct fib_rules_ops *ops)
592{
593 int idx = 0;
594 struct fib_rule *rule;
595
596 rcu_read_lock();
597 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
598 if (idx < cb->args[1])
599 goto skip;
600
601 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
602 cb->nlh->nlmsg_seq, RTM_NEWRULE,
603 NLM_F_MULTI, ops) < 0)
604 break;
605skip:
606 idx++;
607 }
608 rcu_read_unlock();
609 cb->args[1] = idx;
610 rules_ops_put(ops);
611
612 return skb->len;
613}
614
615static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
616{
617 struct net *net = sock_net(skb->sk);
618 struct fib_rules_ops *ops;
619 int idx = 0, family;
620
621 family = rtnl_msg_family(cb->nlh);
622 if (family != AF_UNSPEC) {
623 /* Protocol specific dump request */
624 ops = lookup_rules_ops(net, family);
625 if (ops == NULL)
626 return -EAFNOSUPPORT;
627
628 return dump_rules(skb, cb, ops);
629 }
630
631 rcu_read_lock();
632 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
633 if (idx < cb->args[0] || !try_module_get(ops->owner))
634 goto skip;
635
636 if (dump_rules(skb, cb, ops) < 0)
637 break;
638
639 cb->args[1] = 0;
640skip:
641 idx++;
642 }
643 rcu_read_unlock();
644 cb->args[0] = idx;
645
646 return skb->len;
647}
648
649static void notify_rule_change(int event, struct fib_rule *rule,
650 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
651 u32 pid)
652{
653 struct net *net;
654 struct sk_buff *skb;
655 int err = -ENOBUFS;
656
657 net = ops->fro_net;
658 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
659 if (skb == NULL)
660 goto errout;
661
662 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
663 if (err < 0) {
664 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
665 WARN_ON(err == -EMSGSIZE);
666 kfree_skb(skb);
667 goto errout;
668 }
669
670 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
671 return;
672errout:
673 if (err < 0)
674 rtnl_set_sk_err(net, ops->nlgroup, err);
675}
676
677static void attach_rules(struct list_head *rules, struct net_device *dev)
678{
679 struct fib_rule *rule;
680
681 list_for_each_entry(rule, rules, list) {
682 if (rule->iifindex == -1 &&
683 strcmp(dev->name, rule->iifname) == 0)
684 rule->iifindex = dev->ifindex;
685 if (rule->oifindex == -1 &&
686 strcmp(dev->name, rule->oifname) == 0)
687 rule->oifindex = dev->ifindex;
688 }
689}
690
691static void detach_rules(struct list_head *rules, struct net_device *dev)
692{
693 struct fib_rule *rule;
694
695 list_for_each_entry(rule, rules, list) {
696 if (rule->iifindex == dev->ifindex)
697 rule->iifindex = -1;
698 if (rule->oifindex == dev->ifindex)
699 rule->oifindex = -1;
700 }
701}
702
703
704static int fib_rules_event(struct notifier_block *this, unsigned long event,
705 void *ptr)
706{
707 struct net_device *dev = ptr;
708 struct net *net = dev_net(dev);
709 struct fib_rules_ops *ops;
710
711 ASSERT_RTNL();
712
713 switch (event) {
714 case NETDEV_REGISTER:
715 list_for_each_entry(ops, &net->rules_ops, list)
716 attach_rules(&ops->rules_list, dev);
717 break;
718
719 case NETDEV_UNREGISTER:
720 list_for_each_entry(ops, &net->rules_ops, list)
721 detach_rules(&ops->rules_list, dev);
722 break;
723 }
724
725 return NOTIFY_DONE;
726}
727
728static struct notifier_block fib_rules_notifier = {
729 .notifier_call = fib_rules_event,
730};
731
732static int __net_init fib_rules_net_init(struct net *net)
733{
734 INIT_LIST_HEAD(&net->rules_ops);
735 spin_lock_init(&net->rules_mod_lock);
736 return 0;
737}
738
739static struct pernet_operations fib_rules_net_ops = {
740 .init = fib_rules_net_init,
741};
742
743static int __init fib_rules_init(void)
744{
745 int err;
746 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
747 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
748 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
749
750 err = register_pernet_subsys(&fib_rules_net_ops);
751 if (err < 0)
752 goto fail;
753
754 err = register_netdevice_notifier(&fib_rules_notifier);
755 if (err < 0)
756 goto fail_unregister;
757
758 return 0;
759
760fail_unregister:
761 unregister_pernet_subsys(&fib_rules_net_ops);
762fail:
763 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
764 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
765 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
766 return err;
767}
768
769subsys_initcall(fib_rules_init);
1/*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <net/net_namespace.h>
17#include <net/sock.h>
18#include <net/fib_rules.h>
19#include <net/ip_tunnels.h>
20
21static const struct fib_kuid_range fib_kuid_range_unset = {
22 KUIDT_INIT(0),
23 KUIDT_INIT(~0),
24};
25
26bool fib_rule_matchall(const struct fib_rule *rule)
27{
28 if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
29 rule->flags)
30 return false;
31 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
32 return false;
33 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
34 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
35 return false;
36 if (fib_rule_port_range_set(&rule->sport_range))
37 return false;
38 if (fib_rule_port_range_set(&rule->dport_range))
39 return false;
40 return true;
41}
42EXPORT_SYMBOL_GPL(fib_rule_matchall);
43
44int fib_default_rule_add(struct fib_rules_ops *ops,
45 u32 pref, u32 table, u32 flags)
46{
47 struct fib_rule *r;
48
49 r = kzalloc(ops->rule_size, GFP_KERNEL);
50 if (r == NULL)
51 return -ENOMEM;
52
53 refcount_set(&r->refcnt, 1);
54 r->action = FR_ACT_TO_TBL;
55 r->pref = pref;
56 r->table = table;
57 r->flags = flags;
58 r->proto = RTPROT_KERNEL;
59 r->fr_net = ops->fro_net;
60 r->uid_range = fib_kuid_range_unset;
61
62 r->suppress_prefixlen = -1;
63 r->suppress_ifgroup = -1;
64
65 /* The lock is not required here, the list in unreacheable
66 * at the moment this function is called */
67 list_add_tail(&r->list, &ops->rules_list);
68 return 0;
69}
70EXPORT_SYMBOL(fib_default_rule_add);
71
72static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
73{
74 struct list_head *pos;
75 struct fib_rule *rule;
76
77 if (!list_empty(&ops->rules_list)) {
78 pos = ops->rules_list.next;
79 if (pos->next != &ops->rules_list) {
80 rule = list_entry(pos->next, struct fib_rule, list);
81 if (rule->pref)
82 return rule->pref - 1;
83 }
84 }
85
86 return 0;
87}
88
89static void notify_rule_change(int event, struct fib_rule *rule,
90 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
91 u32 pid);
92
93static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
94{
95 struct fib_rules_ops *ops;
96
97 rcu_read_lock();
98 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
99 if (ops->family == family) {
100 if (!try_module_get(ops->owner))
101 ops = NULL;
102 rcu_read_unlock();
103 return ops;
104 }
105 }
106 rcu_read_unlock();
107
108 return NULL;
109}
110
111static void rules_ops_put(struct fib_rules_ops *ops)
112{
113 if (ops)
114 module_put(ops->owner);
115}
116
117static void flush_route_cache(struct fib_rules_ops *ops)
118{
119 if (ops->flush_cache)
120 ops->flush_cache(ops);
121}
122
123static int __fib_rules_register(struct fib_rules_ops *ops)
124{
125 int err = -EEXIST;
126 struct fib_rules_ops *o;
127 struct net *net;
128
129 net = ops->fro_net;
130
131 if (ops->rule_size < sizeof(struct fib_rule))
132 return -EINVAL;
133
134 if (ops->match == NULL || ops->configure == NULL ||
135 ops->compare == NULL || ops->fill == NULL ||
136 ops->action == NULL)
137 return -EINVAL;
138
139 spin_lock(&net->rules_mod_lock);
140 list_for_each_entry(o, &net->rules_ops, list)
141 if (ops->family == o->family)
142 goto errout;
143
144 list_add_tail_rcu(&ops->list, &net->rules_ops);
145 err = 0;
146errout:
147 spin_unlock(&net->rules_mod_lock);
148
149 return err;
150}
151
152struct fib_rules_ops *
153fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
154{
155 struct fib_rules_ops *ops;
156 int err;
157
158 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
159 if (ops == NULL)
160 return ERR_PTR(-ENOMEM);
161
162 INIT_LIST_HEAD(&ops->rules_list);
163 ops->fro_net = net;
164
165 err = __fib_rules_register(ops);
166 if (err) {
167 kfree(ops);
168 ops = ERR_PTR(err);
169 }
170
171 return ops;
172}
173EXPORT_SYMBOL_GPL(fib_rules_register);
174
175static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
176{
177 struct fib_rule *rule, *tmp;
178
179 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
180 list_del_rcu(&rule->list);
181 if (ops->delete)
182 ops->delete(rule);
183 fib_rule_put(rule);
184 }
185}
186
187void fib_rules_unregister(struct fib_rules_ops *ops)
188{
189 struct net *net = ops->fro_net;
190
191 spin_lock(&net->rules_mod_lock);
192 list_del_rcu(&ops->list);
193 spin_unlock(&net->rules_mod_lock);
194
195 fib_rules_cleanup_ops(ops);
196 kfree_rcu(ops, rcu);
197}
198EXPORT_SYMBOL_GPL(fib_rules_unregister);
199
200static int uid_range_set(struct fib_kuid_range *range)
201{
202 return uid_valid(range->start) && uid_valid(range->end);
203}
204
205static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
206{
207 struct fib_rule_uid_range *in;
208 struct fib_kuid_range out;
209
210 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
211
212 out.start = make_kuid(current_user_ns(), in->start);
213 out.end = make_kuid(current_user_ns(), in->end);
214
215 return out;
216}
217
218static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
219{
220 struct fib_rule_uid_range out = {
221 from_kuid_munged(current_user_ns(), range->start),
222 from_kuid_munged(current_user_ns(), range->end)
223 };
224
225 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
226}
227
228static int nla_get_port_range(struct nlattr *pattr,
229 struct fib_rule_port_range *port_range)
230{
231 const struct fib_rule_port_range *pr = nla_data(pattr);
232
233 if (!fib_rule_port_range_valid(pr))
234 return -EINVAL;
235
236 port_range->start = pr->start;
237 port_range->end = pr->end;
238
239 return 0;
240}
241
242static int nla_put_port_range(struct sk_buff *skb, int attrtype,
243 struct fib_rule_port_range *range)
244{
245 return nla_put(skb, attrtype, sizeof(*range), range);
246}
247
248static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
249 struct flowi *fl, int flags,
250 struct fib_lookup_arg *arg)
251{
252 int ret = 0;
253
254 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
255 goto out;
256
257 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
258 goto out;
259
260 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
261 goto out;
262
263 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
264 goto out;
265
266 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
267 goto out;
268
269 if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
270 uid_gt(fl->flowi_uid, rule->uid_range.end))
271 goto out;
272
273 ret = ops->match(rule, fl, flags);
274out:
275 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
276}
277
278int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
279 int flags, struct fib_lookup_arg *arg)
280{
281 struct fib_rule *rule;
282 int err;
283
284 rcu_read_lock();
285
286 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
287jumped:
288 if (!fib_rule_match(rule, ops, fl, flags, arg))
289 continue;
290
291 if (rule->action == FR_ACT_GOTO) {
292 struct fib_rule *target;
293
294 target = rcu_dereference(rule->ctarget);
295 if (target == NULL) {
296 continue;
297 } else {
298 rule = target;
299 goto jumped;
300 }
301 } else if (rule->action == FR_ACT_NOP)
302 continue;
303 else
304 err = ops->action(rule, fl, flags, arg);
305
306 if (!err && ops->suppress && ops->suppress(rule, arg))
307 continue;
308
309 if (err != -EAGAIN) {
310 if ((arg->flags & FIB_LOOKUP_NOREF) ||
311 likely(refcount_inc_not_zero(&rule->refcnt))) {
312 arg->rule = rule;
313 goto out;
314 }
315 break;
316 }
317 }
318
319 err = -ESRCH;
320out:
321 rcu_read_unlock();
322
323 return err;
324}
325EXPORT_SYMBOL_GPL(fib_rules_lookup);
326
327static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net,
328 enum fib_event_type event_type,
329 struct fib_rule *rule, int family)
330{
331 struct fib_rule_notifier_info info = {
332 .info.family = family,
333 .rule = rule,
334 };
335
336 return call_fib_notifier(nb, net, event_type, &info.info);
337}
338
339static int call_fib_rule_notifiers(struct net *net,
340 enum fib_event_type event_type,
341 struct fib_rule *rule,
342 struct fib_rules_ops *ops,
343 struct netlink_ext_ack *extack)
344{
345 struct fib_rule_notifier_info info = {
346 .info.family = ops->family,
347 .info.extack = extack,
348 .rule = rule,
349 };
350
351 ops->fib_rules_seq++;
352 return call_fib_notifiers(net, event_type, &info.info);
353}
354
355/* Called with rcu_read_lock() */
356int fib_rules_dump(struct net *net, struct notifier_block *nb, int family)
357{
358 struct fib_rules_ops *ops;
359 struct fib_rule *rule;
360
361 ops = lookup_rules_ops(net, family);
362 if (!ops)
363 return -EAFNOSUPPORT;
364 list_for_each_entry_rcu(rule, &ops->rules_list, list)
365 call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule,
366 family);
367 rules_ops_put(ops);
368
369 return 0;
370}
371EXPORT_SYMBOL_GPL(fib_rules_dump);
372
373unsigned int fib_rules_seq_read(struct net *net, int family)
374{
375 unsigned int fib_rules_seq;
376 struct fib_rules_ops *ops;
377
378 ASSERT_RTNL();
379
380 ops = lookup_rules_ops(net, family);
381 if (!ops)
382 return 0;
383 fib_rules_seq = ops->fib_rules_seq;
384 rules_ops_put(ops);
385
386 return fib_rules_seq;
387}
388EXPORT_SYMBOL_GPL(fib_rules_seq_read);
389
390static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
391 struct fib_rules_ops *ops)
392{
393 int err = -EINVAL;
394
395 if (frh->src_len)
396 if (tb[FRA_SRC] == NULL ||
397 frh->src_len > (ops->addr_size * 8) ||
398 nla_len(tb[FRA_SRC]) != ops->addr_size)
399 goto errout;
400
401 if (frh->dst_len)
402 if (tb[FRA_DST] == NULL ||
403 frh->dst_len > (ops->addr_size * 8) ||
404 nla_len(tb[FRA_DST]) != ops->addr_size)
405 goto errout;
406
407 err = 0;
408errout:
409 return err;
410}
411
412static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
413 struct nlattr **tb, struct fib_rule *rule)
414{
415 struct fib_rule *r;
416
417 list_for_each_entry(r, &ops->rules_list, list) {
418 if (r->action != rule->action)
419 continue;
420
421 if (r->table != rule->table)
422 continue;
423
424 if (r->pref != rule->pref)
425 continue;
426
427 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
428 continue;
429
430 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
431 continue;
432
433 if (r->mark != rule->mark)
434 continue;
435
436 if (r->mark_mask != rule->mark_mask)
437 continue;
438
439 if (r->tun_id != rule->tun_id)
440 continue;
441
442 if (r->fr_net != rule->fr_net)
443 continue;
444
445 if (r->l3mdev != rule->l3mdev)
446 continue;
447
448 if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
449 !uid_eq(r->uid_range.end, rule->uid_range.end))
450 continue;
451
452 if (r->ip_proto != rule->ip_proto)
453 continue;
454
455 if (!fib_rule_port_range_compare(&r->sport_range,
456 &rule->sport_range))
457 continue;
458
459 if (!fib_rule_port_range_compare(&r->dport_range,
460 &rule->dport_range))
461 continue;
462
463 if (!ops->compare(r, frh, tb))
464 continue;
465 return 1;
466 }
467 return 0;
468}
469
470int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
471 struct netlink_ext_ack *extack)
472{
473 struct net *net = sock_net(skb->sk);
474 struct fib_rule_hdr *frh = nlmsg_data(nlh);
475 struct fib_rules_ops *ops = NULL;
476 struct fib_rule *rule, *r, *last = NULL;
477 struct nlattr *tb[FRA_MAX+1];
478 int err = -EINVAL, unresolved = 0;
479
480 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
481 goto errout;
482
483 ops = lookup_rules_ops(net, frh->family);
484 if (ops == NULL) {
485 err = -EAFNOSUPPORT;
486 goto errout;
487 }
488
489 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
490 if (err < 0)
491 goto errout;
492
493 err = validate_rulemsg(frh, tb, ops);
494 if (err < 0)
495 goto errout;
496
497 rule = kzalloc(ops->rule_size, GFP_KERNEL);
498 if (rule == NULL) {
499 err = -ENOMEM;
500 goto errout;
501 }
502 refcount_set(&rule->refcnt, 1);
503 rule->fr_net = net;
504
505 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
506 : fib_default_rule_pref(ops);
507
508 rule->proto = tb[FRA_PROTOCOL] ?
509 nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
510
511 if (tb[FRA_IIFNAME]) {
512 struct net_device *dev;
513
514 rule->iifindex = -1;
515 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
516 dev = __dev_get_by_name(net, rule->iifname);
517 if (dev)
518 rule->iifindex = dev->ifindex;
519 }
520
521 if (tb[FRA_OIFNAME]) {
522 struct net_device *dev;
523
524 rule->oifindex = -1;
525 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
526 dev = __dev_get_by_name(net, rule->oifname);
527 if (dev)
528 rule->oifindex = dev->ifindex;
529 }
530
531 if (tb[FRA_FWMARK]) {
532 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
533 if (rule->mark)
534 /* compatibility: if the mark value is non-zero all bits
535 * are compared unless a mask is explicitly specified.
536 */
537 rule->mark_mask = 0xFFFFFFFF;
538 }
539
540 if (tb[FRA_FWMASK])
541 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
542
543 if (tb[FRA_TUN_ID])
544 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
545
546 err = -EINVAL;
547 if (tb[FRA_L3MDEV]) {
548#ifdef CONFIG_NET_L3_MASTER_DEV
549 rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
550 if (rule->l3mdev != 1)
551#endif
552 goto errout_free;
553 }
554
555 rule->action = frh->action;
556 rule->flags = frh->flags;
557 rule->table = frh_get_table(frh, tb);
558 if (tb[FRA_SUPPRESS_PREFIXLEN])
559 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
560 else
561 rule->suppress_prefixlen = -1;
562
563 if (tb[FRA_SUPPRESS_IFGROUP])
564 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
565 else
566 rule->suppress_ifgroup = -1;
567
568 if (tb[FRA_GOTO]) {
569 if (rule->action != FR_ACT_GOTO)
570 goto errout_free;
571
572 rule->target = nla_get_u32(tb[FRA_GOTO]);
573 /* Backward jumps are prohibited to avoid endless loops */
574 if (rule->target <= rule->pref)
575 goto errout_free;
576
577 list_for_each_entry(r, &ops->rules_list, list) {
578 if (r->pref == rule->target) {
579 RCU_INIT_POINTER(rule->ctarget, r);
580 break;
581 }
582 }
583
584 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
585 unresolved = 1;
586 } else if (rule->action == FR_ACT_GOTO)
587 goto errout_free;
588
589 if (rule->l3mdev && rule->table)
590 goto errout_free;
591
592 if (tb[FRA_UID_RANGE]) {
593 if (current_user_ns() != net->user_ns) {
594 err = -EPERM;
595 goto errout_free;
596 }
597
598 rule->uid_range = nla_get_kuid_range(tb);
599
600 if (!uid_range_set(&rule->uid_range) ||
601 !uid_lte(rule->uid_range.start, rule->uid_range.end))
602 goto errout_free;
603 } else {
604 rule->uid_range = fib_kuid_range_unset;
605 }
606
607 if (tb[FRA_IP_PROTO])
608 rule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
609
610 if (tb[FRA_SPORT_RANGE]) {
611 err = nla_get_port_range(tb[FRA_SPORT_RANGE],
612 &rule->sport_range);
613 if (err)
614 goto errout_free;
615 }
616
617 if (tb[FRA_DPORT_RANGE]) {
618 err = nla_get_port_range(tb[FRA_DPORT_RANGE],
619 &rule->dport_range);
620 if (err)
621 goto errout_free;
622 }
623
624 if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
625 rule_exists(ops, frh, tb, rule)) {
626 err = -EEXIST;
627 goto errout_free;
628 }
629
630 err = ops->configure(rule, skb, frh, tb);
631 if (err < 0)
632 goto errout_free;
633
634 err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops,
635 extack);
636 if (err < 0)
637 goto errout_free;
638
639 list_for_each_entry(r, &ops->rules_list, list) {
640 if (r->pref > rule->pref)
641 break;
642 last = r;
643 }
644
645 if (last)
646 list_add_rcu(&rule->list, &last->list);
647 else
648 list_add_rcu(&rule->list, &ops->rules_list);
649
650 if (ops->unresolved_rules) {
651 /*
652 * There are unresolved goto rules in the list, check if
653 * any of them are pointing to this new rule.
654 */
655 list_for_each_entry(r, &ops->rules_list, list) {
656 if (r->action == FR_ACT_GOTO &&
657 r->target == rule->pref &&
658 rtnl_dereference(r->ctarget) == NULL) {
659 rcu_assign_pointer(r->ctarget, rule);
660 if (--ops->unresolved_rules == 0)
661 break;
662 }
663 }
664 }
665
666 if (rule->action == FR_ACT_GOTO)
667 ops->nr_goto_rules++;
668
669 if (unresolved)
670 ops->unresolved_rules++;
671
672 if (rule->tun_id)
673 ip_tunnel_need_metadata();
674
675 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
676 flush_route_cache(ops);
677 rules_ops_put(ops);
678 return 0;
679
680errout_free:
681 kfree(rule);
682errout:
683 rules_ops_put(ops);
684 return err;
685}
686EXPORT_SYMBOL_GPL(fib_nl_newrule);
687
688int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
689 struct netlink_ext_ack *extack)
690{
691 struct net *net = sock_net(skb->sk);
692 struct fib_rule_hdr *frh = nlmsg_data(nlh);
693 struct fib_rule_port_range sprange = {0, 0};
694 struct fib_rule_port_range dprange = {0, 0};
695 struct fib_rules_ops *ops = NULL;
696 struct fib_rule *rule, *r;
697 struct nlattr *tb[FRA_MAX+1];
698 struct fib_kuid_range range;
699 int err = -EINVAL;
700
701 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
702 goto errout;
703
704 ops = lookup_rules_ops(net, frh->family);
705 if (ops == NULL) {
706 err = -EAFNOSUPPORT;
707 goto errout;
708 }
709
710 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
711 if (err < 0)
712 goto errout;
713
714 err = validate_rulemsg(frh, tb, ops);
715 if (err < 0)
716 goto errout;
717
718 if (tb[FRA_UID_RANGE]) {
719 range = nla_get_kuid_range(tb);
720 if (!uid_range_set(&range)) {
721 err = -EINVAL;
722 goto errout;
723 }
724 } else {
725 range = fib_kuid_range_unset;
726 }
727
728 if (tb[FRA_SPORT_RANGE]) {
729 err = nla_get_port_range(tb[FRA_SPORT_RANGE],
730 &sprange);
731 if (err)
732 goto errout;
733 }
734
735 if (tb[FRA_DPORT_RANGE]) {
736 err = nla_get_port_range(tb[FRA_DPORT_RANGE],
737 &dprange);
738 if (err)
739 goto errout;
740 }
741
742 list_for_each_entry(rule, &ops->rules_list, list) {
743 if (tb[FRA_PROTOCOL] &&
744 (rule->proto != nla_get_u8(tb[FRA_PROTOCOL])))
745 continue;
746
747 if (frh->action && (frh->action != rule->action))
748 continue;
749
750 if (frh_get_table(frh, tb) &&
751 (frh_get_table(frh, tb) != rule->table))
752 continue;
753
754 if (tb[FRA_PRIORITY] &&
755 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
756 continue;
757
758 if (tb[FRA_IIFNAME] &&
759 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
760 continue;
761
762 if (tb[FRA_OIFNAME] &&
763 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
764 continue;
765
766 if (tb[FRA_FWMARK] &&
767 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
768 continue;
769
770 if (tb[FRA_FWMASK] &&
771 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
772 continue;
773
774 if (tb[FRA_TUN_ID] &&
775 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
776 continue;
777
778 if (tb[FRA_L3MDEV] &&
779 (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
780 continue;
781
782 if (uid_range_set(&range) &&
783 (!uid_eq(rule->uid_range.start, range.start) ||
784 !uid_eq(rule->uid_range.end, range.end)))
785 continue;
786
787 if (tb[FRA_IP_PROTO] &&
788 (rule->ip_proto != nla_get_u8(tb[FRA_IP_PROTO])))
789 continue;
790
791 if (fib_rule_port_range_set(&sprange) &&
792 !fib_rule_port_range_compare(&rule->sport_range, &sprange))
793 continue;
794
795 if (fib_rule_port_range_set(&dprange) &&
796 !fib_rule_port_range_compare(&rule->dport_range, &dprange))
797 continue;
798
799 if (!ops->compare(rule, frh, tb))
800 continue;
801
802 if (rule->flags & FIB_RULE_PERMANENT) {
803 err = -EPERM;
804 goto errout;
805 }
806
807 if (ops->delete) {
808 err = ops->delete(rule);
809 if (err)
810 goto errout;
811 }
812
813 if (rule->tun_id)
814 ip_tunnel_unneed_metadata();
815
816 list_del_rcu(&rule->list);
817
818 if (rule->action == FR_ACT_GOTO) {
819 ops->nr_goto_rules--;
820 if (rtnl_dereference(rule->ctarget) == NULL)
821 ops->unresolved_rules--;
822 }
823
824 /*
825 * Check if this rule is a target to any of them. If so,
826 * adjust to the next one with the same preference or
827 * disable them. As this operation is eventually very
828 * expensive, it is only performed if goto rules, except
829 * current if it is goto rule, have actually been added.
830 */
831 if (ops->nr_goto_rules > 0) {
832 struct fib_rule *n;
833
834 n = list_next_entry(rule, list);
835 if (&n->list == &ops->rules_list || n->pref != rule->pref)
836 n = NULL;
837 list_for_each_entry(r, &ops->rules_list, list) {
838 if (rtnl_dereference(r->ctarget) != rule)
839 continue;
840 rcu_assign_pointer(r->ctarget, n);
841 if (!n)
842 ops->unresolved_rules++;
843 }
844 }
845
846 call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
847 NULL);
848 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
849 NETLINK_CB(skb).portid);
850 fib_rule_put(rule);
851 flush_route_cache(ops);
852 rules_ops_put(ops);
853 return 0;
854 }
855
856 err = -ENOENT;
857errout:
858 rules_ops_put(ops);
859 return err;
860}
861EXPORT_SYMBOL_GPL(fib_nl_delrule);
862
863static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
864 struct fib_rule *rule)
865{
866 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
867 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
868 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
869 + nla_total_size(4) /* FRA_PRIORITY */
870 + nla_total_size(4) /* FRA_TABLE */
871 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
872 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
873 + nla_total_size(4) /* FRA_FWMARK */
874 + nla_total_size(4) /* FRA_FWMASK */
875 + nla_total_size_64bit(8) /* FRA_TUN_ID */
876 + nla_total_size(sizeof(struct fib_kuid_range))
877 + nla_total_size(1) /* FRA_PROTOCOL */
878 + nla_total_size(1) /* FRA_IP_PROTO */
879 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */
880 + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */
881
882 if (ops->nlmsg_payload)
883 payload += ops->nlmsg_payload(rule);
884
885 return payload;
886}
887
888static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
889 u32 pid, u32 seq, int type, int flags,
890 struct fib_rules_ops *ops)
891{
892 struct nlmsghdr *nlh;
893 struct fib_rule_hdr *frh;
894
895 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
896 if (nlh == NULL)
897 return -EMSGSIZE;
898
899 frh = nlmsg_data(nlh);
900 frh->family = ops->family;
901 frh->table = rule->table;
902 if (nla_put_u32(skb, FRA_TABLE, rule->table))
903 goto nla_put_failure;
904 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
905 goto nla_put_failure;
906 frh->res1 = 0;
907 frh->res2 = 0;
908 frh->action = rule->action;
909 frh->flags = rule->flags;
910
911 if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto))
912 goto nla_put_failure;
913
914 if (rule->action == FR_ACT_GOTO &&
915 rcu_access_pointer(rule->ctarget) == NULL)
916 frh->flags |= FIB_RULE_UNRESOLVED;
917
918 if (rule->iifname[0]) {
919 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
920 goto nla_put_failure;
921 if (rule->iifindex == -1)
922 frh->flags |= FIB_RULE_IIF_DETACHED;
923 }
924
925 if (rule->oifname[0]) {
926 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
927 goto nla_put_failure;
928 if (rule->oifindex == -1)
929 frh->flags |= FIB_RULE_OIF_DETACHED;
930 }
931
932 if ((rule->pref &&
933 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
934 (rule->mark &&
935 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
936 ((rule->mark_mask || rule->mark) &&
937 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
938 (rule->target &&
939 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
940 (rule->tun_id &&
941 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
942 (rule->l3mdev &&
943 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
944 (uid_range_set(&rule->uid_range) &&
945 nla_put_uid_range(skb, &rule->uid_range)) ||
946 (fib_rule_port_range_set(&rule->sport_range) &&
947 nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) ||
948 (fib_rule_port_range_set(&rule->dport_range) &&
949 nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) ||
950 (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto)))
951 goto nla_put_failure;
952
953 if (rule->suppress_ifgroup != -1) {
954 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
955 goto nla_put_failure;
956 }
957
958 if (ops->fill(rule, skb, frh) < 0)
959 goto nla_put_failure;
960
961 nlmsg_end(skb, nlh);
962 return 0;
963
964nla_put_failure:
965 nlmsg_cancel(skb, nlh);
966 return -EMSGSIZE;
967}
968
969static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
970 struct fib_rules_ops *ops)
971{
972 int idx = 0;
973 struct fib_rule *rule;
974 int err = 0;
975
976 rcu_read_lock();
977 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
978 if (idx < cb->args[1])
979 goto skip;
980
981 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
982 cb->nlh->nlmsg_seq, RTM_NEWRULE,
983 NLM_F_MULTI, ops);
984 if (err)
985 break;
986skip:
987 idx++;
988 }
989 rcu_read_unlock();
990 cb->args[1] = idx;
991 rules_ops_put(ops);
992
993 return err;
994}
995
996static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
997{
998 struct net *net = sock_net(skb->sk);
999 struct fib_rules_ops *ops;
1000 int idx = 0, family;
1001
1002 family = rtnl_msg_family(cb->nlh);
1003 if (family != AF_UNSPEC) {
1004 /* Protocol specific dump request */
1005 ops = lookup_rules_ops(net, family);
1006 if (ops == NULL)
1007 return -EAFNOSUPPORT;
1008
1009 dump_rules(skb, cb, ops);
1010
1011 return skb->len;
1012 }
1013
1014 rcu_read_lock();
1015 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
1016 if (idx < cb->args[0] || !try_module_get(ops->owner))
1017 goto skip;
1018
1019 if (dump_rules(skb, cb, ops) < 0)
1020 break;
1021
1022 cb->args[1] = 0;
1023skip:
1024 idx++;
1025 }
1026 rcu_read_unlock();
1027 cb->args[0] = idx;
1028
1029 return skb->len;
1030}
1031
1032static void notify_rule_change(int event, struct fib_rule *rule,
1033 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
1034 u32 pid)
1035{
1036 struct net *net;
1037 struct sk_buff *skb;
1038 int err = -ENOBUFS;
1039
1040 net = ops->fro_net;
1041 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
1042 if (skb == NULL)
1043 goto errout;
1044
1045 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
1046 if (err < 0) {
1047 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
1048 WARN_ON(err == -EMSGSIZE);
1049 kfree_skb(skb);
1050 goto errout;
1051 }
1052
1053 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
1054 return;
1055errout:
1056 if (err < 0)
1057 rtnl_set_sk_err(net, ops->nlgroup, err);
1058}
1059
1060static void attach_rules(struct list_head *rules, struct net_device *dev)
1061{
1062 struct fib_rule *rule;
1063
1064 list_for_each_entry(rule, rules, list) {
1065 if (rule->iifindex == -1 &&
1066 strcmp(dev->name, rule->iifname) == 0)
1067 rule->iifindex = dev->ifindex;
1068 if (rule->oifindex == -1 &&
1069 strcmp(dev->name, rule->oifname) == 0)
1070 rule->oifindex = dev->ifindex;
1071 }
1072}
1073
1074static void detach_rules(struct list_head *rules, struct net_device *dev)
1075{
1076 struct fib_rule *rule;
1077
1078 list_for_each_entry(rule, rules, list) {
1079 if (rule->iifindex == dev->ifindex)
1080 rule->iifindex = -1;
1081 if (rule->oifindex == dev->ifindex)
1082 rule->oifindex = -1;
1083 }
1084}
1085
1086
1087static int fib_rules_event(struct notifier_block *this, unsigned long event,
1088 void *ptr)
1089{
1090 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1091 struct net *net = dev_net(dev);
1092 struct fib_rules_ops *ops;
1093
1094 ASSERT_RTNL();
1095
1096 switch (event) {
1097 case NETDEV_REGISTER:
1098 list_for_each_entry(ops, &net->rules_ops, list)
1099 attach_rules(&ops->rules_list, dev);
1100 break;
1101
1102 case NETDEV_CHANGENAME:
1103 list_for_each_entry(ops, &net->rules_ops, list) {
1104 detach_rules(&ops->rules_list, dev);
1105 attach_rules(&ops->rules_list, dev);
1106 }
1107 break;
1108
1109 case NETDEV_UNREGISTER:
1110 list_for_each_entry(ops, &net->rules_ops, list)
1111 detach_rules(&ops->rules_list, dev);
1112 break;
1113 }
1114
1115 return NOTIFY_DONE;
1116}
1117
1118static struct notifier_block fib_rules_notifier = {
1119 .notifier_call = fib_rules_event,
1120};
1121
1122static int __net_init fib_rules_net_init(struct net *net)
1123{
1124 INIT_LIST_HEAD(&net->rules_ops);
1125 spin_lock_init(&net->rules_mod_lock);
1126 return 0;
1127}
1128
1129static void __net_exit fib_rules_net_exit(struct net *net)
1130{
1131 WARN_ON_ONCE(!list_empty(&net->rules_ops));
1132}
1133
1134static struct pernet_operations fib_rules_net_ops = {
1135 .init = fib_rules_net_init,
1136 .exit = fib_rules_net_exit,
1137};
1138
1139static int __init fib_rules_init(void)
1140{
1141 int err;
1142 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
1143 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
1144 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0);
1145
1146 err = register_pernet_subsys(&fib_rules_net_ops);
1147 if (err < 0)
1148 goto fail;
1149
1150 err = register_netdevice_notifier(&fib_rules_notifier);
1151 if (err < 0)
1152 goto fail_unregister;
1153
1154 return 0;
1155
1156fail_unregister:
1157 unregister_pernet_subsys(&fib_rules_net_ops);
1158fail:
1159 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
1160 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
1161 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
1162 return err;
1163}
1164
1165subsys_initcall(fib_rules_init);