Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/core/fib_rules.c Generic Routing Rules
4 *
5 * Authors: Thomas Graf <tgraf@suug.ch>
6 */
7
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/list.h>
12#include <linux/module.h>
13#include <net/net_namespace.h>
14#include <net/sock.h>
15#include <net/fib_rules.h>
16#include <net/ip_tunnels.h>
17#include <linux/indirect_call_wrapper.h>
18
19#if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES)
20#ifdef CONFIG_IP_MULTIPLE_TABLES
21#define INDIRECT_CALL_MT(f, f2, f1, ...) \
22 INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__)
23#else
24#define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
25#endif
26#elif defined(CONFIG_IP_MULTIPLE_TABLES)
27#define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
28#else
29#define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__)
30#endif
31
32static const struct fib_kuid_range fib_kuid_range_unset = {
33 KUIDT_INIT(0),
34 KUIDT_INIT(~0),
35};
36
37bool fib_rule_matchall(const struct fib_rule *rule)
38{
39 if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
40 rule->flags)
41 return false;
42 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
43 return false;
44 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
45 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
46 return false;
47 if (fib_rule_port_range_set(&rule->sport_range))
48 return false;
49 if (fib_rule_port_range_set(&rule->dport_range))
50 return false;
51 return true;
52}
53EXPORT_SYMBOL_GPL(fib_rule_matchall);
54
55int fib_default_rule_add(struct fib_rules_ops *ops,
56 u32 pref, u32 table, u32 flags)
57{
58 struct fib_rule *r;
59
60 r = kzalloc(ops->rule_size, GFP_KERNEL);
61 if (r == NULL)
62 return -ENOMEM;
63
64 refcount_set(&r->refcnt, 1);
65 r->action = FR_ACT_TO_TBL;
66 r->pref = pref;
67 r->table = table;
68 r->flags = flags;
69 r->proto = RTPROT_KERNEL;
70 r->fr_net = ops->fro_net;
71 r->uid_range = fib_kuid_range_unset;
72
73 r->suppress_prefixlen = -1;
74 r->suppress_ifgroup = -1;
75
76 /* The lock is not required here, the list in unreacheable
77 * at the moment this function is called */
78 list_add_tail(&r->list, &ops->rules_list);
79 return 0;
80}
81EXPORT_SYMBOL(fib_default_rule_add);
82
83static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
84{
85 struct list_head *pos;
86 struct fib_rule *rule;
87
88 if (!list_empty(&ops->rules_list)) {
89 pos = ops->rules_list.next;
90 if (pos->next != &ops->rules_list) {
91 rule = list_entry(pos->next, struct fib_rule, list);
92 if (rule->pref)
93 return rule->pref - 1;
94 }
95 }
96
97 return 0;
98}
99
100static void notify_rule_change(int event, struct fib_rule *rule,
101 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
102 u32 pid);
103
104static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
105{
106 struct fib_rules_ops *ops;
107
108 rcu_read_lock();
109 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
110 if (ops->family == family) {
111 if (!try_module_get(ops->owner))
112 ops = NULL;
113 rcu_read_unlock();
114 return ops;
115 }
116 }
117 rcu_read_unlock();
118
119 return NULL;
120}
121
122static void rules_ops_put(struct fib_rules_ops *ops)
123{
124 if (ops)
125 module_put(ops->owner);
126}
127
128static void flush_route_cache(struct fib_rules_ops *ops)
129{
130 if (ops->flush_cache)
131 ops->flush_cache(ops);
132}
133
134static int __fib_rules_register(struct fib_rules_ops *ops)
135{
136 int err = -EEXIST;
137 struct fib_rules_ops *o;
138 struct net *net;
139
140 net = ops->fro_net;
141
142 if (ops->rule_size < sizeof(struct fib_rule))
143 return -EINVAL;
144
145 if (ops->match == NULL || ops->configure == NULL ||
146 ops->compare == NULL || ops->fill == NULL ||
147 ops->action == NULL)
148 return -EINVAL;
149
150 spin_lock(&net->rules_mod_lock);
151 list_for_each_entry(o, &net->rules_ops, list)
152 if (ops->family == o->family)
153 goto errout;
154
155 list_add_tail_rcu(&ops->list, &net->rules_ops);
156 err = 0;
157errout:
158 spin_unlock(&net->rules_mod_lock);
159
160 return err;
161}
162
163struct fib_rules_ops *
164fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
165{
166 struct fib_rules_ops *ops;
167 int err;
168
169 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
170 if (ops == NULL)
171 return ERR_PTR(-ENOMEM);
172
173 INIT_LIST_HEAD(&ops->rules_list);
174 ops->fro_net = net;
175
176 err = __fib_rules_register(ops);
177 if (err) {
178 kfree(ops);
179 ops = ERR_PTR(err);
180 }
181
182 return ops;
183}
184EXPORT_SYMBOL_GPL(fib_rules_register);
185
186static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
187{
188 struct fib_rule *rule, *tmp;
189
190 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
191 list_del_rcu(&rule->list);
192 if (ops->delete)
193 ops->delete(rule);
194 fib_rule_put(rule);
195 }
196}
197
198void fib_rules_unregister(struct fib_rules_ops *ops)
199{
200 struct net *net = ops->fro_net;
201
202 spin_lock(&net->rules_mod_lock);
203 list_del_rcu(&ops->list);
204 spin_unlock(&net->rules_mod_lock);
205
206 fib_rules_cleanup_ops(ops);
207 kfree_rcu(ops, rcu);
208}
209EXPORT_SYMBOL_GPL(fib_rules_unregister);
210
211static int uid_range_set(struct fib_kuid_range *range)
212{
213 return uid_valid(range->start) && uid_valid(range->end);
214}
215
216static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
217{
218 struct fib_rule_uid_range *in;
219 struct fib_kuid_range out;
220
221 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
222
223 out.start = make_kuid(current_user_ns(), in->start);
224 out.end = make_kuid(current_user_ns(), in->end);
225
226 return out;
227}
228
229static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
230{
231 struct fib_rule_uid_range out = {
232 from_kuid_munged(current_user_ns(), range->start),
233 from_kuid_munged(current_user_ns(), range->end)
234 };
235
236 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
237}
238
239static int nla_get_port_range(struct nlattr *pattr,
240 struct fib_rule_port_range *port_range)
241{
242 const struct fib_rule_port_range *pr = nla_data(pattr);
243
244 if (!fib_rule_port_range_valid(pr))
245 return -EINVAL;
246
247 port_range->start = pr->start;
248 port_range->end = pr->end;
249
250 return 0;
251}
252
253static int nla_put_port_range(struct sk_buff *skb, int attrtype,
254 struct fib_rule_port_range *range)
255{
256 return nla_put(skb, attrtype, sizeof(*range), range);
257}
258
259static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
260 struct flowi *fl, int flags,
261 struct fib_lookup_arg *arg)
262{
263 int ret = 0;
264
265 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
266 goto out;
267
268 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
269 goto out;
270
271 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
272 goto out;
273
274 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
275 goto out;
276
277 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
278 goto out;
279
280 if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
281 uid_gt(fl->flowi_uid, rule->uid_range.end))
282 goto out;
283
284 ret = INDIRECT_CALL_MT(ops->match,
285 fib6_rule_match,
286 fib4_rule_match,
287 rule, fl, flags);
288out:
289 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
290}
291
292int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
293 int flags, struct fib_lookup_arg *arg)
294{
295 struct fib_rule *rule;
296 int err;
297
298 rcu_read_lock();
299
300 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
301jumped:
302 if (!fib_rule_match(rule, ops, fl, flags, arg))
303 continue;
304
305 if (rule->action == FR_ACT_GOTO) {
306 struct fib_rule *target;
307
308 target = rcu_dereference(rule->ctarget);
309 if (target == NULL) {
310 continue;
311 } else {
312 rule = target;
313 goto jumped;
314 }
315 } else if (rule->action == FR_ACT_NOP)
316 continue;
317 else
318 err = INDIRECT_CALL_MT(ops->action,
319 fib6_rule_action,
320 fib4_rule_action,
321 rule, fl, flags, arg);
322
323 if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
324 fib6_rule_suppress,
325 fib4_rule_suppress,
326 rule, arg))
327 continue;
328
329 if (err != -EAGAIN) {
330 if ((arg->flags & FIB_LOOKUP_NOREF) ||
331 likely(refcount_inc_not_zero(&rule->refcnt))) {
332 arg->rule = rule;
333 goto out;
334 }
335 break;
336 }
337 }
338
339 err = -ESRCH;
340out:
341 rcu_read_unlock();
342
343 return err;
344}
345EXPORT_SYMBOL_GPL(fib_rules_lookup);
346
347static int call_fib_rule_notifier(struct notifier_block *nb,
348 enum fib_event_type event_type,
349 struct fib_rule *rule, int family,
350 struct netlink_ext_ack *extack)
351{
352 struct fib_rule_notifier_info info = {
353 .info.family = family,
354 .info.extack = extack,
355 .rule = rule,
356 };
357
358 return call_fib_notifier(nb, event_type, &info.info);
359}
360
361static int call_fib_rule_notifiers(struct net *net,
362 enum fib_event_type event_type,
363 struct fib_rule *rule,
364 struct fib_rules_ops *ops,
365 struct netlink_ext_ack *extack)
366{
367 struct fib_rule_notifier_info info = {
368 .info.family = ops->family,
369 .info.extack = extack,
370 .rule = rule,
371 };
372
373 ops->fib_rules_seq++;
374 return call_fib_notifiers(net, event_type, &info.info);
375}
376
377/* Called with rcu_read_lock() */
378int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
379 struct netlink_ext_ack *extack)
380{
381 struct fib_rules_ops *ops;
382 struct fib_rule *rule;
383 int err = 0;
384
385 ops = lookup_rules_ops(net, family);
386 if (!ops)
387 return -EAFNOSUPPORT;
388 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
389 err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD,
390 rule, family, extack);
391 if (err)
392 break;
393 }
394 rules_ops_put(ops);
395
396 return err;
397}
398EXPORT_SYMBOL_GPL(fib_rules_dump);
399
400unsigned int fib_rules_seq_read(struct net *net, int family)
401{
402 unsigned int fib_rules_seq;
403 struct fib_rules_ops *ops;
404
405 ASSERT_RTNL();
406
407 ops = lookup_rules_ops(net, family);
408 if (!ops)
409 return 0;
410 fib_rules_seq = ops->fib_rules_seq;
411 rules_ops_put(ops);
412
413 return fib_rules_seq;
414}
415EXPORT_SYMBOL_GPL(fib_rules_seq_read);
416
417static struct fib_rule *rule_find(struct fib_rules_ops *ops,
418 struct fib_rule_hdr *frh,
419 struct nlattr **tb,
420 struct fib_rule *rule,
421 bool user_priority)
422{
423 struct fib_rule *r;
424
425 list_for_each_entry(r, &ops->rules_list, list) {
426 if (rule->action && r->action != rule->action)
427 continue;
428
429 if (rule->table && r->table != rule->table)
430 continue;
431
432 if (user_priority && r->pref != rule->pref)
433 continue;
434
435 if (rule->iifname[0] &&
436 memcmp(r->iifname, rule->iifname, IFNAMSIZ))
437 continue;
438
439 if (rule->oifname[0] &&
440 memcmp(r->oifname, rule->oifname, IFNAMSIZ))
441 continue;
442
443 if (rule->mark && r->mark != rule->mark)
444 continue;
445
446 if (rule->suppress_ifgroup != -1 &&
447 r->suppress_ifgroup != rule->suppress_ifgroup)
448 continue;
449
450 if (rule->suppress_prefixlen != -1 &&
451 r->suppress_prefixlen != rule->suppress_prefixlen)
452 continue;
453
454 if (rule->mark_mask && r->mark_mask != rule->mark_mask)
455 continue;
456
457 if (rule->tun_id && r->tun_id != rule->tun_id)
458 continue;
459
460 if (r->fr_net != rule->fr_net)
461 continue;
462
463 if (rule->l3mdev && r->l3mdev != rule->l3mdev)
464 continue;
465
466 if (uid_range_set(&rule->uid_range) &&
467 (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
468 !uid_eq(r->uid_range.end, rule->uid_range.end)))
469 continue;
470
471 if (rule->ip_proto && r->ip_proto != rule->ip_proto)
472 continue;
473
474 if (rule->proto && r->proto != rule->proto)
475 continue;
476
477 if (fib_rule_port_range_set(&rule->sport_range) &&
478 !fib_rule_port_range_compare(&r->sport_range,
479 &rule->sport_range))
480 continue;
481
482 if (fib_rule_port_range_set(&rule->dport_range) &&
483 !fib_rule_port_range_compare(&r->dport_range,
484 &rule->dport_range))
485 continue;
486
487 if (!ops->compare(r, frh, tb))
488 continue;
489 return r;
490 }
491
492 return NULL;
493}
494
495#ifdef CONFIG_NET_L3_MASTER_DEV
496static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
497 struct netlink_ext_ack *extack)
498{
499 nlrule->l3mdev = nla_get_u8(nla);
500 if (nlrule->l3mdev != 1) {
501 NL_SET_ERR_MSG(extack, "Invalid l3mdev attribute");
502 return -1;
503 }
504
505 return 0;
506}
507#else
508static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
509 struct netlink_ext_ack *extack)
510{
511 NL_SET_ERR_MSG(extack, "l3mdev support is not enabled in kernel");
512 return -1;
513}
514#endif
515
516static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
517 struct netlink_ext_ack *extack,
518 struct fib_rules_ops *ops,
519 struct nlattr *tb[],
520 struct fib_rule **rule,
521 bool *user_priority)
522{
523 struct net *net = sock_net(skb->sk);
524 struct fib_rule_hdr *frh = nlmsg_data(nlh);
525 struct fib_rule *nlrule = NULL;
526 int err = -EINVAL;
527
528 if (frh->src_len)
529 if (!tb[FRA_SRC] ||
530 frh->src_len > (ops->addr_size * 8) ||
531 nla_len(tb[FRA_SRC]) != ops->addr_size) {
532 NL_SET_ERR_MSG(extack, "Invalid source address");
533 goto errout;
534 }
535
536 if (frh->dst_len)
537 if (!tb[FRA_DST] ||
538 frh->dst_len > (ops->addr_size * 8) ||
539 nla_len(tb[FRA_DST]) != ops->addr_size) {
540 NL_SET_ERR_MSG(extack, "Invalid dst address");
541 goto errout;
542 }
543
544 nlrule = kzalloc(ops->rule_size, GFP_KERNEL);
545 if (!nlrule) {
546 err = -ENOMEM;
547 goto errout;
548 }
549 refcount_set(&nlrule->refcnt, 1);
550 nlrule->fr_net = net;
551
552 if (tb[FRA_PRIORITY]) {
553 nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]);
554 *user_priority = true;
555 } else {
556 nlrule->pref = fib_default_rule_pref(ops);
557 }
558
559 nlrule->proto = tb[FRA_PROTOCOL] ?
560 nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
561
562 if (tb[FRA_IIFNAME]) {
563 struct net_device *dev;
564
565 nlrule->iifindex = -1;
566 nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
567 dev = __dev_get_by_name(net, nlrule->iifname);
568 if (dev)
569 nlrule->iifindex = dev->ifindex;
570 }
571
572 if (tb[FRA_OIFNAME]) {
573 struct net_device *dev;
574
575 nlrule->oifindex = -1;
576 nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
577 dev = __dev_get_by_name(net, nlrule->oifname);
578 if (dev)
579 nlrule->oifindex = dev->ifindex;
580 }
581
582 if (tb[FRA_FWMARK]) {
583 nlrule->mark = nla_get_u32(tb[FRA_FWMARK]);
584 if (nlrule->mark)
585 /* compatibility: if the mark value is non-zero all bits
586 * are compared unless a mask is explicitly specified.
587 */
588 nlrule->mark_mask = 0xFFFFFFFF;
589 }
590
591 if (tb[FRA_FWMASK])
592 nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
593
594 if (tb[FRA_TUN_ID])
595 nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
596
597 err = -EINVAL;
598 if (tb[FRA_L3MDEV] &&
599 fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0)
600 goto errout_free;
601
602 nlrule->action = frh->action;
603 nlrule->flags = frh->flags;
604 nlrule->table = frh_get_table(frh, tb);
605 if (tb[FRA_SUPPRESS_PREFIXLEN])
606 nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
607 else
608 nlrule->suppress_prefixlen = -1;
609
610 if (tb[FRA_SUPPRESS_IFGROUP])
611 nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
612 else
613 nlrule->suppress_ifgroup = -1;
614
615 if (tb[FRA_GOTO]) {
616 if (nlrule->action != FR_ACT_GOTO) {
617 NL_SET_ERR_MSG(extack, "Unexpected goto");
618 goto errout_free;
619 }
620
621 nlrule->target = nla_get_u32(tb[FRA_GOTO]);
622 /* Backward jumps are prohibited to avoid endless loops */
623 if (nlrule->target <= nlrule->pref) {
624 NL_SET_ERR_MSG(extack, "Backward goto not supported");
625 goto errout_free;
626 }
627 } else if (nlrule->action == FR_ACT_GOTO) {
628 NL_SET_ERR_MSG(extack, "Missing goto target for action goto");
629 goto errout_free;
630 }
631
632 if (nlrule->l3mdev && nlrule->table) {
633 NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive");
634 goto errout_free;
635 }
636
637 if (tb[FRA_UID_RANGE]) {
638 if (current_user_ns() != net->user_ns) {
639 err = -EPERM;
640 NL_SET_ERR_MSG(extack, "No permission to set uid");
641 goto errout_free;
642 }
643
644 nlrule->uid_range = nla_get_kuid_range(tb);
645
646 if (!uid_range_set(&nlrule->uid_range) ||
647 !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) {
648 NL_SET_ERR_MSG(extack, "Invalid uid range");
649 goto errout_free;
650 }
651 } else {
652 nlrule->uid_range = fib_kuid_range_unset;
653 }
654
655 if (tb[FRA_IP_PROTO])
656 nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
657
658 if (tb[FRA_SPORT_RANGE]) {
659 err = nla_get_port_range(tb[FRA_SPORT_RANGE],
660 &nlrule->sport_range);
661 if (err) {
662 NL_SET_ERR_MSG(extack, "Invalid sport range");
663 goto errout_free;
664 }
665 }
666
667 if (tb[FRA_DPORT_RANGE]) {
668 err = nla_get_port_range(tb[FRA_DPORT_RANGE],
669 &nlrule->dport_range);
670 if (err) {
671 NL_SET_ERR_MSG(extack, "Invalid dport range");
672 goto errout_free;
673 }
674 }
675
676 *rule = nlrule;
677
678 return 0;
679
680errout_free:
681 kfree(nlrule);
682errout:
683 return err;
684}
685
686static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
687 struct nlattr **tb, struct fib_rule *rule)
688{
689 struct fib_rule *r;
690
691 list_for_each_entry(r, &ops->rules_list, list) {
692 if (r->action != rule->action)
693 continue;
694
695 if (r->table != rule->table)
696 continue;
697
698 if (r->pref != rule->pref)
699 continue;
700
701 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
702 continue;
703
704 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
705 continue;
706
707 if (r->mark != rule->mark)
708 continue;
709
710 if (r->suppress_ifgroup != rule->suppress_ifgroup)
711 continue;
712
713 if (r->suppress_prefixlen != rule->suppress_prefixlen)
714 continue;
715
716 if (r->mark_mask != rule->mark_mask)
717 continue;
718
719 if (r->tun_id != rule->tun_id)
720 continue;
721
722 if (r->fr_net != rule->fr_net)
723 continue;
724
725 if (r->l3mdev != rule->l3mdev)
726 continue;
727
728 if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
729 !uid_eq(r->uid_range.end, rule->uid_range.end))
730 continue;
731
732 if (r->ip_proto != rule->ip_proto)
733 continue;
734
735 if (r->proto != rule->proto)
736 continue;
737
738 if (!fib_rule_port_range_compare(&r->sport_range,
739 &rule->sport_range))
740 continue;
741
742 if (!fib_rule_port_range_compare(&r->dport_range,
743 &rule->dport_range))
744 continue;
745
746 if (!ops->compare(r, frh, tb))
747 continue;
748 return 1;
749 }
750 return 0;
751}
752
753int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
754 struct netlink_ext_ack *extack)
755{
756 struct net *net = sock_net(skb->sk);
757 struct fib_rule_hdr *frh = nlmsg_data(nlh);
758 struct fib_rules_ops *ops = NULL;
759 struct fib_rule *rule = NULL, *r, *last = NULL;
760 struct nlattr *tb[FRA_MAX + 1];
761 int err = -EINVAL, unresolved = 0;
762 bool user_priority = false;
763
764 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
765 NL_SET_ERR_MSG(extack, "Invalid msg length");
766 goto errout;
767 }
768
769 ops = lookup_rules_ops(net, frh->family);
770 if (!ops) {
771 err = -EAFNOSUPPORT;
772 NL_SET_ERR_MSG(extack, "Rule family not supported");
773 goto errout;
774 }
775
776 err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
777 ops->policy, extack);
778 if (err < 0) {
779 NL_SET_ERR_MSG(extack, "Error parsing msg");
780 goto errout;
781 }
782
783 err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
784 if (err)
785 goto errout;
786
787 if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
788 rule_exists(ops, frh, tb, rule)) {
789 err = -EEXIST;
790 goto errout_free;
791 }
792
793 err = ops->configure(rule, skb, frh, tb, extack);
794 if (err < 0)
795 goto errout_free;
796
797 err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops,
798 extack);
799 if (err < 0)
800 goto errout_free;
801
802 list_for_each_entry(r, &ops->rules_list, list) {
803 if (r->pref == rule->target) {
804 RCU_INIT_POINTER(rule->ctarget, r);
805 break;
806 }
807 }
808
809 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
810 unresolved = 1;
811
812 list_for_each_entry(r, &ops->rules_list, list) {
813 if (r->pref > rule->pref)
814 break;
815 last = r;
816 }
817
818 if (last)
819 list_add_rcu(&rule->list, &last->list);
820 else
821 list_add_rcu(&rule->list, &ops->rules_list);
822
823 if (ops->unresolved_rules) {
824 /*
825 * There are unresolved goto rules in the list, check if
826 * any of them are pointing to this new rule.
827 */
828 list_for_each_entry(r, &ops->rules_list, list) {
829 if (r->action == FR_ACT_GOTO &&
830 r->target == rule->pref &&
831 rtnl_dereference(r->ctarget) == NULL) {
832 rcu_assign_pointer(r->ctarget, rule);
833 if (--ops->unresolved_rules == 0)
834 break;
835 }
836 }
837 }
838
839 if (rule->action == FR_ACT_GOTO)
840 ops->nr_goto_rules++;
841
842 if (unresolved)
843 ops->unresolved_rules++;
844
845 if (rule->tun_id)
846 ip_tunnel_need_metadata();
847
848 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
849 flush_route_cache(ops);
850 rules_ops_put(ops);
851 return 0;
852
853errout_free:
854 kfree(rule);
855errout:
856 rules_ops_put(ops);
857 return err;
858}
859EXPORT_SYMBOL_GPL(fib_nl_newrule);
860
861int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
862 struct netlink_ext_ack *extack)
863{
864 struct net *net = sock_net(skb->sk);
865 struct fib_rule_hdr *frh = nlmsg_data(nlh);
866 struct fib_rules_ops *ops = NULL;
867 struct fib_rule *rule = NULL, *r, *nlrule = NULL;
868 struct nlattr *tb[FRA_MAX+1];
869 int err = -EINVAL;
870 bool user_priority = false;
871
872 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
873 NL_SET_ERR_MSG(extack, "Invalid msg length");
874 goto errout;
875 }
876
877 ops = lookup_rules_ops(net, frh->family);
878 if (ops == NULL) {
879 err = -EAFNOSUPPORT;
880 NL_SET_ERR_MSG(extack, "Rule family not supported");
881 goto errout;
882 }
883
884 err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
885 ops->policy, extack);
886 if (err < 0) {
887 NL_SET_ERR_MSG(extack, "Error parsing msg");
888 goto errout;
889 }
890
891 err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
892 if (err)
893 goto errout;
894
895 rule = rule_find(ops, frh, tb, nlrule, user_priority);
896 if (!rule) {
897 err = -ENOENT;
898 goto errout;
899 }
900
901 if (rule->flags & FIB_RULE_PERMANENT) {
902 err = -EPERM;
903 goto errout;
904 }
905
906 if (ops->delete) {
907 err = ops->delete(rule);
908 if (err)
909 goto errout;
910 }
911
912 if (rule->tun_id)
913 ip_tunnel_unneed_metadata();
914
915 list_del_rcu(&rule->list);
916
917 if (rule->action == FR_ACT_GOTO) {
918 ops->nr_goto_rules--;
919 if (rtnl_dereference(rule->ctarget) == NULL)
920 ops->unresolved_rules--;
921 }
922
923 /*
924 * Check if this rule is a target to any of them. If so,
925 * adjust to the next one with the same preference or
926 * disable them. As this operation is eventually very
927 * expensive, it is only performed if goto rules, except
928 * current if it is goto rule, have actually been added.
929 */
930 if (ops->nr_goto_rules > 0) {
931 struct fib_rule *n;
932
933 n = list_next_entry(rule, list);
934 if (&n->list == &ops->rules_list || n->pref != rule->pref)
935 n = NULL;
936 list_for_each_entry(r, &ops->rules_list, list) {
937 if (rtnl_dereference(r->ctarget) != rule)
938 continue;
939 rcu_assign_pointer(r->ctarget, n);
940 if (!n)
941 ops->unresolved_rules++;
942 }
943 }
944
945 call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
946 NULL);
947 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
948 NETLINK_CB(skb).portid);
949 fib_rule_put(rule);
950 flush_route_cache(ops);
951 rules_ops_put(ops);
952 kfree(nlrule);
953 return 0;
954
955errout:
956 kfree(nlrule);
957 rules_ops_put(ops);
958 return err;
959}
960EXPORT_SYMBOL_GPL(fib_nl_delrule);
961
962static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
963 struct fib_rule *rule)
964{
965 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
966 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
967 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
968 + nla_total_size(4) /* FRA_PRIORITY */
969 + nla_total_size(4) /* FRA_TABLE */
970 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
971 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
972 + nla_total_size(4) /* FRA_FWMARK */
973 + nla_total_size(4) /* FRA_FWMASK */
974 + nla_total_size_64bit(8) /* FRA_TUN_ID */
975 + nla_total_size(sizeof(struct fib_kuid_range))
976 + nla_total_size(1) /* FRA_PROTOCOL */
977 + nla_total_size(1) /* FRA_IP_PROTO */
978 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */
979 + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */
980
981 if (ops->nlmsg_payload)
982 payload += ops->nlmsg_payload(rule);
983
984 return payload;
985}
986
987static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
988 u32 pid, u32 seq, int type, int flags,
989 struct fib_rules_ops *ops)
990{
991 struct nlmsghdr *nlh;
992 struct fib_rule_hdr *frh;
993
994 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
995 if (nlh == NULL)
996 return -EMSGSIZE;
997
998 frh = nlmsg_data(nlh);
999 frh->family = ops->family;
1000 frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
1001 if (nla_put_u32(skb, FRA_TABLE, rule->table))
1002 goto nla_put_failure;
1003 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
1004 goto nla_put_failure;
1005 frh->res1 = 0;
1006 frh->res2 = 0;
1007 frh->action = rule->action;
1008 frh->flags = rule->flags;
1009
1010 if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto))
1011 goto nla_put_failure;
1012
1013 if (rule->action == FR_ACT_GOTO &&
1014 rcu_access_pointer(rule->ctarget) == NULL)
1015 frh->flags |= FIB_RULE_UNRESOLVED;
1016
1017 if (rule->iifname[0]) {
1018 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
1019 goto nla_put_failure;
1020 if (rule->iifindex == -1)
1021 frh->flags |= FIB_RULE_IIF_DETACHED;
1022 }
1023
1024 if (rule->oifname[0]) {
1025 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
1026 goto nla_put_failure;
1027 if (rule->oifindex == -1)
1028 frh->flags |= FIB_RULE_OIF_DETACHED;
1029 }
1030
1031 if ((rule->pref &&
1032 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
1033 (rule->mark &&
1034 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
1035 ((rule->mark_mask || rule->mark) &&
1036 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
1037 (rule->target &&
1038 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
1039 (rule->tun_id &&
1040 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
1041 (rule->l3mdev &&
1042 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
1043 (uid_range_set(&rule->uid_range) &&
1044 nla_put_uid_range(skb, &rule->uid_range)) ||
1045 (fib_rule_port_range_set(&rule->sport_range) &&
1046 nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) ||
1047 (fib_rule_port_range_set(&rule->dport_range) &&
1048 nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) ||
1049 (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto)))
1050 goto nla_put_failure;
1051
1052 if (rule->suppress_ifgroup != -1) {
1053 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
1054 goto nla_put_failure;
1055 }
1056
1057 if (ops->fill(rule, skb, frh) < 0)
1058 goto nla_put_failure;
1059
1060 nlmsg_end(skb, nlh);
1061 return 0;
1062
1063nla_put_failure:
1064 nlmsg_cancel(skb, nlh);
1065 return -EMSGSIZE;
1066}
1067
1068static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
1069 struct fib_rules_ops *ops)
1070{
1071 int idx = 0;
1072 struct fib_rule *rule;
1073 int err = 0;
1074
1075 rcu_read_lock();
1076 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
1077 if (idx < cb->args[1])
1078 goto skip;
1079
1080 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
1081 cb->nlh->nlmsg_seq, RTM_NEWRULE,
1082 NLM_F_MULTI, ops);
1083 if (err)
1084 break;
1085skip:
1086 idx++;
1087 }
1088 rcu_read_unlock();
1089 cb->args[1] = idx;
1090 rules_ops_put(ops);
1091
1092 return err;
1093}
1094
1095static int fib_valid_dumprule_req(const struct nlmsghdr *nlh,
1096 struct netlink_ext_ack *extack)
1097{
1098 struct fib_rule_hdr *frh;
1099
1100 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
1101 NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request");
1102 return -EINVAL;
1103 }
1104
1105 frh = nlmsg_data(nlh);
1106 if (frh->dst_len || frh->src_len || frh->tos || frh->table ||
1107 frh->res1 || frh->res2 || frh->action || frh->flags) {
1108 NL_SET_ERR_MSG(extack,
1109 "Invalid values in header for fib rule dump request");
1110 return -EINVAL;
1111 }
1112
1113 if (nlmsg_attrlen(nlh, sizeof(*frh))) {
1114 NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request");
1115 return -EINVAL;
1116 }
1117
1118 return 0;
1119}
1120
1121static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
1122{
1123 const struct nlmsghdr *nlh = cb->nlh;
1124 struct net *net = sock_net(skb->sk);
1125 struct fib_rules_ops *ops;
1126 int idx = 0, family;
1127
1128 if (cb->strict_check) {
1129 int err = fib_valid_dumprule_req(nlh, cb->extack);
1130
1131 if (err < 0)
1132 return err;
1133 }
1134
1135 family = rtnl_msg_family(nlh);
1136 if (family != AF_UNSPEC) {
1137 /* Protocol specific dump request */
1138 ops = lookup_rules_ops(net, family);
1139 if (ops == NULL)
1140 return -EAFNOSUPPORT;
1141
1142 dump_rules(skb, cb, ops);
1143
1144 return skb->len;
1145 }
1146
1147 rcu_read_lock();
1148 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
1149 if (idx < cb->args[0] || !try_module_get(ops->owner))
1150 goto skip;
1151
1152 if (dump_rules(skb, cb, ops) < 0)
1153 break;
1154
1155 cb->args[1] = 0;
1156skip:
1157 idx++;
1158 }
1159 rcu_read_unlock();
1160 cb->args[0] = idx;
1161
1162 return skb->len;
1163}
1164
1165static void notify_rule_change(int event, struct fib_rule *rule,
1166 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
1167 u32 pid)
1168{
1169 struct net *net;
1170 struct sk_buff *skb;
1171 int err = -ENOMEM;
1172
1173 net = ops->fro_net;
1174 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
1175 if (skb == NULL)
1176 goto errout;
1177
1178 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
1179 if (err < 0) {
1180 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
1181 WARN_ON(err == -EMSGSIZE);
1182 kfree_skb(skb);
1183 goto errout;
1184 }
1185
1186 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
1187 return;
1188errout:
1189 if (err < 0)
1190 rtnl_set_sk_err(net, ops->nlgroup, err);
1191}
1192
1193static void attach_rules(struct list_head *rules, struct net_device *dev)
1194{
1195 struct fib_rule *rule;
1196
1197 list_for_each_entry(rule, rules, list) {
1198 if (rule->iifindex == -1 &&
1199 strcmp(dev->name, rule->iifname) == 0)
1200 rule->iifindex = dev->ifindex;
1201 if (rule->oifindex == -1 &&
1202 strcmp(dev->name, rule->oifname) == 0)
1203 rule->oifindex = dev->ifindex;
1204 }
1205}
1206
1207static void detach_rules(struct list_head *rules, struct net_device *dev)
1208{
1209 struct fib_rule *rule;
1210
1211 list_for_each_entry(rule, rules, list) {
1212 if (rule->iifindex == dev->ifindex)
1213 rule->iifindex = -1;
1214 if (rule->oifindex == dev->ifindex)
1215 rule->oifindex = -1;
1216 }
1217}
1218
1219
1220static int fib_rules_event(struct notifier_block *this, unsigned long event,
1221 void *ptr)
1222{
1223 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1224 struct net *net = dev_net(dev);
1225 struct fib_rules_ops *ops;
1226
1227 ASSERT_RTNL();
1228
1229 switch (event) {
1230 case NETDEV_REGISTER:
1231 list_for_each_entry(ops, &net->rules_ops, list)
1232 attach_rules(&ops->rules_list, dev);
1233 break;
1234
1235 case NETDEV_CHANGENAME:
1236 list_for_each_entry(ops, &net->rules_ops, list) {
1237 detach_rules(&ops->rules_list, dev);
1238 attach_rules(&ops->rules_list, dev);
1239 }
1240 break;
1241
1242 case NETDEV_UNREGISTER:
1243 list_for_each_entry(ops, &net->rules_ops, list)
1244 detach_rules(&ops->rules_list, dev);
1245 break;
1246 }
1247
1248 return NOTIFY_DONE;
1249}
1250
1251static struct notifier_block fib_rules_notifier = {
1252 .notifier_call = fib_rules_event,
1253};
1254
1255static int __net_init fib_rules_net_init(struct net *net)
1256{
1257 INIT_LIST_HEAD(&net->rules_ops);
1258 spin_lock_init(&net->rules_mod_lock);
1259 return 0;
1260}
1261
1262static void __net_exit fib_rules_net_exit(struct net *net)
1263{
1264 WARN_ON_ONCE(!list_empty(&net->rules_ops));
1265}
1266
1267static struct pernet_operations fib_rules_net_ops = {
1268 .init = fib_rules_net_init,
1269 .exit = fib_rules_net_exit,
1270};
1271
1272static int __init fib_rules_init(void)
1273{
1274 int err;
1275 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
1276 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
1277 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0);
1278
1279 err = register_pernet_subsys(&fib_rules_net_ops);
1280 if (err < 0)
1281 goto fail;
1282
1283 err = register_netdevice_notifier(&fib_rules_notifier);
1284 if (err < 0)
1285 goto fail_unregister;
1286
1287 return 0;
1288
1289fail_unregister:
1290 unregister_pernet_subsys(&fib_rules_net_ops);
1291fail:
1292 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
1293 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
1294 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
1295 return err;
1296}
1297
1298subsys_initcall(fib_rules_init);
1/*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <net/net_namespace.h>
17#include <net/sock.h>
18#include <net/fib_rules.h>
19
20int fib_default_rule_add(struct fib_rules_ops *ops,
21 u32 pref, u32 table, u32 flags)
22{
23 struct fib_rule *r;
24
25 r = kzalloc(ops->rule_size, GFP_KERNEL);
26 if (r == NULL)
27 return -ENOMEM;
28
29 atomic_set(&r->refcnt, 1);
30 r->action = FR_ACT_TO_TBL;
31 r->pref = pref;
32 r->table = table;
33 r->flags = flags;
34 r->fr_net = hold_net(ops->fro_net);
35
36 /* The lock is not required here, the list in unreacheable
37 * at the moment this function is called */
38 list_add_tail(&r->list, &ops->rules_list);
39 return 0;
40}
41EXPORT_SYMBOL(fib_default_rule_add);
42
43u32 fib_default_rule_pref(struct fib_rules_ops *ops)
44{
45 struct list_head *pos;
46 struct fib_rule *rule;
47
48 if (!list_empty(&ops->rules_list)) {
49 pos = ops->rules_list.next;
50 if (pos->next != &ops->rules_list) {
51 rule = list_entry(pos->next, struct fib_rule, list);
52 if (rule->pref)
53 return rule->pref - 1;
54 }
55 }
56
57 return 0;
58}
59EXPORT_SYMBOL(fib_default_rule_pref);
60
61static void notify_rule_change(int event, struct fib_rule *rule,
62 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
63 u32 pid);
64
65static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
66{
67 struct fib_rules_ops *ops;
68
69 rcu_read_lock();
70 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
71 if (ops->family == family) {
72 if (!try_module_get(ops->owner))
73 ops = NULL;
74 rcu_read_unlock();
75 return ops;
76 }
77 }
78 rcu_read_unlock();
79
80 return NULL;
81}
82
83static void rules_ops_put(struct fib_rules_ops *ops)
84{
85 if (ops)
86 module_put(ops->owner);
87}
88
89static void flush_route_cache(struct fib_rules_ops *ops)
90{
91 if (ops->flush_cache)
92 ops->flush_cache(ops);
93}
94
95static int __fib_rules_register(struct fib_rules_ops *ops)
96{
97 int err = -EEXIST;
98 struct fib_rules_ops *o;
99 struct net *net;
100
101 net = ops->fro_net;
102
103 if (ops->rule_size < sizeof(struct fib_rule))
104 return -EINVAL;
105
106 if (ops->match == NULL || ops->configure == NULL ||
107 ops->compare == NULL || ops->fill == NULL ||
108 ops->action == NULL)
109 return -EINVAL;
110
111 spin_lock(&net->rules_mod_lock);
112 list_for_each_entry(o, &net->rules_ops, list)
113 if (ops->family == o->family)
114 goto errout;
115
116 hold_net(net);
117 list_add_tail_rcu(&ops->list, &net->rules_ops);
118 err = 0;
119errout:
120 spin_unlock(&net->rules_mod_lock);
121
122 return err;
123}
124
125struct fib_rules_ops *
126fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
127{
128 struct fib_rules_ops *ops;
129 int err;
130
131 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
132 if (ops == NULL)
133 return ERR_PTR(-ENOMEM);
134
135 INIT_LIST_HEAD(&ops->rules_list);
136 ops->fro_net = net;
137
138 err = __fib_rules_register(ops);
139 if (err) {
140 kfree(ops);
141 ops = ERR_PTR(err);
142 }
143
144 return ops;
145}
146EXPORT_SYMBOL_GPL(fib_rules_register);
147
148static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
149{
150 struct fib_rule *rule, *tmp;
151
152 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
153 list_del_rcu(&rule->list);
154 fib_rule_put(rule);
155 }
156}
157
158static void fib_rules_put_rcu(struct rcu_head *head)
159{
160 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
161 struct net *net = ops->fro_net;
162
163 release_net(net);
164 kfree(ops);
165}
166
167void fib_rules_unregister(struct fib_rules_ops *ops)
168{
169 struct net *net = ops->fro_net;
170
171 spin_lock(&net->rules_mod_lock);
172 list_del_rcu(&ops->list);
173 fib_rules_cleanup_ops(ops);
174 spin_unlock(&net->rules_mod_lock);
175
176 call_rcu(&ops->rcu, fib_rules_put_rcu);
177}
178EXPORT_SYMBOL_GPL(fib_rules_unregister);
179
180static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
181 struct flowi *fl, int flags)
182{
183 int ret = 0;
184
185 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
186 goto out;
187
188 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
189 goto out;
190
191 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
192 goto out;
193
194 ret = ops->match(rule, fl, flags);
195out:
196 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
197}
198
199int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
200 int flags, struct fib_lookup_arg *arg)
201{
202 struct fib_rule *rule;
203 int err;
204
205 rcu_read_lock();
206
207 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
208jumped:
209 if (!fib_rule_match(rule, ops, fl, flags))
210 continue;
211
212 if (rule->action == FR_ACT_GOTO) {
213 struct fib_rule *target;
214
215 target = rcu_dereference(rule->ctarget);
216 if (target == NULL) {
217 continue;
218 } else {
219 rule = target;
220 goto jumped;
221 }
222 } else if (rule->action == FR_ACT_NOP)
223 continue;
224 else
225 err = ops->action(rule, fl, flags, arg);
226
227 if (err != -EAGAIN) {
228 if ((arg->flags & FIB_LOOKUP_NOREF) ||
229 likely(atomic_inc_not_zero(&rule->refcnt))) {
230 arg->rule = rule;
231 goto out;
232 }
233 break;
234 }
235 }
236
237 err = -ESRCH;
238out:
239 rcu_read_unlock();
240
241 return err;
242}
243EXPORT_SYMBOL_GPL(fib_rules_lookup);
244
245static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
246 struct fib_rules_ops *ops)
247{
248 int err = -EINVAL;
249
250 if (frh->src_len)
251 if (tb[FRA_SRC] == NULL ||
252 frh->src_len > (ops->addr_size * 8) ||
253 nla_len(tb[FRA_SRC]) != ops->addr_size)
254 goto errout;
255
256 if (frh->dst_len)
257 if (tb[FRA_DST] == NULL ||
258 frh->dst_len > (ops->addr_size * 8) ||
259 nla_len(tb[FRA_DST]) != ops->addr_size)
260 goto errout;
261
262 err = 0;
263errout:
264 return err;
265}
266
267static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
268{
269 struct net *net = sock_net(skb->sk);
270 struct fib_rule_hdr *frh = nlmsg_data(nlh);
271 struct fib_rules_ops *ops = NULL;
272 struct fib_rule *rule, *r, *last = NULL;
273 struct nlattr *tb[FRA_MAX+1];
274 int err = -EINVAL, unresolved = 0;
275
276 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
277 goto errout;
278
279 ops = lookup_rules_ops(net, frh->family);
280 if (ops == NULL) {
281 err = -EAFNOSUPPORT;
282 goto errout;
283 }
284
285 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
286 if (err < 0)
287 goto errout;
288
289 err = validate_rulemsg(frh, tb, ops);
290 if (err < 0)
291 goto errout;
292
293 rule = kzalloc(ops->rule_size, GFP_KERNEL);
294 if (rule == NULL) {
295 err = -ENOMEM;
296 goto errout;
297 }
298 rule->fr_net = hold_net(net);
299
300 if (tb[FRA_PRIORITY])
301 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
302
303 if (tb[FRA_IIFNAME]) {
304 struct net_device *dev;
305
306 rule->iifindex = -1;
307 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
308 dev = __dev_get_by_name(net, rule->iifname);
309 if (dev)
310 rule->iifindex = dev->ifindex;
311 }
312
313 if (tb[FRA_OIFNAME]) {
314 struct net_device *dev;
315
316 rule->oifindex = -1;
317 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
318 dev = __dev_get_by_name(net, rule->oifname);
319 if (dev)
320 rule->oifindex = dev->ifindex;
321 }
322
323 if (tb[FRA_FWMARK]) {
324 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
325 if (rule->mark)
326 /* compatibility: if the mark value is non-zero all bits
327 * are compared unless a mask is explicitly specified.
328 */
329 rule->mark_mask = 0xFFFFFFFF;
330 }
331
332 if (tb[FRA_FWMASK])
333 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
334
335 rule->action = frh->action;
336 rule->flags = frh->flags;
337 rule->table = frh_get_table(frh, tb);
338
339 if (!tb[FRA_PRIORITY] && ops->default_pref)
340 rule->pref = ops->default_pref(ops);
341
342 err = -EINVAL;
343 if (tb[FRA_GOTO]) {
344 if (rule->action != FR_ACT_GOTO)
345 goto errout_free;
346
347 rule->target = nla_get_u32(tb[FRA_GOTO]);
348 /* Backward jumps are prohibited to avoid endless loops */
349 if (rule->target <= rule->pref)
350 goto errout_free;
351
352 list_for_each_entry(r, &ops->rules_list, list) {
353 if (r->pref == rule->target) {
354 RCU_INIT_POINTER(rule->ctarget, r);
355 break;
356 }
357 }
358
359 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
360 unresolved = 1;
361 } else if (rule->action == FR_ACT_GOTO)
362 goto errout_free;
363
364 err = ops->configure(rule, skb, frh, tb);
365 if (err < 0)
366 goto errout_free;
367
368 list_for_each_entry(r, &ops->rules_list, list) {
369 if (r->pref > rule->pref)
370 break;
371 last = r;
372 }
373
374 fib_rule_get(rule);
375
376 if (last)
377 list_add_rcu(&rule->list, &last->list);
378 else
379 list_add_rcu(&rule->list, &ops->rules_list);
380
381 if (ops->unresolved_rules) {
382 /*
383 * There are unresolved goto rules in the list, check if
384 * any of them are pointing to this new rule.
385 */
386 list_for_each_entry(r, &ops->rules_list, list) {
387 if (r->action == FR_ACT_GOTO &&
388 r->target == rule->pref &&
389 rtnl_dereference(r->ctarget) == NULL) {
390 rcu_assign_pointer(r->ctarget, rule);
391 if (--ops->unresolved_rules == 0)
392 break;
393 }
394 }
395 }
396
397 if (rule->action == FR_ACT_GOTO)
398 ops->nr_goto_rules++;
399
400 if (unresolved)
401 ops->unresolved_rules++;
402
403 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
404 flush_route_cache(ops);
405 rules_ops_put(ops);
406 return 0;
407
408errout_free:
409 release_net(rule->fr_net);
410 kfree(rule);
411errout:
412 rules_ops_put(ops);
413 return err;
414}
415
416static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
417{
418 struct net *net = sock_net(skb->sk);
419 struct fib_rule_hdr *frh = nlmsg_data(nlh);
420 struct fib_rules_ops *ops = NULL;
421 struct fib_rule *rule, *tmp;
422 struct nlattr *tb[FRA_MAX+1];
423 int err = -EINVAL;
424
425 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
426 goto errout;
427
428 ops = lookup_rules_ops(net, frh->family);
429 if (ops == NULL) {
430 err = -EAFNOSUPPORT;
431 goto errout;
432 }
433
434 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
435 if (err < 0)
436 goto errout;
437
438 err = validate_rulemsg(frh, tb, ops);
439 if (err < 0)
440 goto errout;
441
442 list_for_each_entry(rule, &ops->rules_list, list) {
443 if (frh->action && (frh->action != rule->action))
444 continue;
445
446 if (frh->table && (frh_get_table(frh, tb) != rule->table))
447 continue;
448
449 if (tb[FRA_PRIORITY] &&
450 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
451 continue;
452
453 if (tb[FRA_IIFNAME] &&
454 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
455 continue;
456
457 if (tb[FRA_OIFNAME] &&
458 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
459 continue;
460
461 if (tb[FRA_FWMARK] &&
462 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
463 continue;
464
465 if (tb[FRA_FWMASK] &&
466 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
467 continue;
468
469 if (!ops->compare(rule, frh, tb))
470 continue;
471
472 if (rule->flags & FIB_RULE_PERMANENT) {
473 err = -EPERM;
474 goto errout;
475 }
476
477 list_del_rcu(&rule->list);
478
479 if (rule->action == FR_ACT_GOTO) {
480 ops->nr_goto_rules--;
481 if (rtnl_dereference(rule->ctarget) == NULL)
482 ops->unresolved_rules--;
483 }
484
485 /*
486 * Check if this rule is a target to any of them. If so,
487 * disable them. As this operation is eventually very
488 * expensive, it is only performed if goto rules have
489 * actually been added.
490 */
491 if (ops->nr_goto_rules > 0) {
492 list_for_each_entry(tmp, &ops->rules_list, list) {
493 if (rtnl_dereference(tmp->ctarget) == rule) {
494 RCU_INIT_POINTER(tmp->ctarget, NULL);
495 ops->unresolved_rules++;
496 }
497 }
498 }
499
500 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
501 NETLINK_CB(skb).pid);
502 fib_rule_put(rule);
503 flush_route_cache(ops);
504 rules_ops_put(ops);
505 return 0;
506 }
507
508 err = -ENOENT;
509errout:
510 rules_ops_put(ops);
511 return err;
512}
513
514static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
515 struct fib_rule *rule)
516{
517 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
518 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
519 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
520 + nla_total_size(4) /* FRA_PRIORITY */
521 + nla_total_size(4) /* FRA_TABLE */
522 + nla_total_size(4) /* FRA_FWMARK */
523 + nla_total_size(4); /* FRA_FWMASK */
524
525 if (ops->nlmsg_payload)
526 payload += ops->nlmsg_payload(rule);
527
528 return payload;
529}
530
531static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
532 u32 pid, u32 seq, int type, int flags,
533 struct fib_rules_ops *ops)
534{
535 struct nlmsghdr *nlh;
536 struct fib_rule_hdr *frh;
537
538 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
539 if (nlh == NULL)
540 return -EMSGSIZE;
541
542 frh = nlmsg_data(nlh);
543 frh->family = ops->family;
544 frh->table = rule->table;
545 if (nla_put_u32(skb, FRA_TABLE, rule->table))
546 goto nla_put_failure;
547 frh->res1 = 0;
548 frh->res2 = 0;
549 frh->action = rule->action;
550 frh->flags = rule->flags;
551
552 if (rule->action == FR_ACT_GOTO &&
553 rcu_access_pointer(rule->ctarget) == NULL)
554 frh->flags |= FIB_RULE_UNRESOLVED;
555
556 if (rule->iifname[0]) {
557 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
558 goto nla_put_failure;
559 if (rule->iifindex == -1)
560 frh->flags |= FIB_RULE_IIF_DETACHED;
561 }
562
563 if (rule->oifname[0]) {
564 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
565 goto nla_put_failure;
566 if (rule->oifindex == -1)
567 frh->flags |= FIB_RULE_OIF_DETACHED;
568 }
569
570 if ((rule->pref &&
571 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
572 (rule->mark &&
573 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
574 ((rule->mark_mask || rule->mark) &&
575 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
576 (rule->target &&
577 nla_put_u32(skb, FRA_GOTO, rule->target)))
578 goto nla_put_failure;
579 if (ops->fill(rule, skb, frh) < 0)
580 goto nla_put_failure;
581
582 return nlmsg_end(skb, nlh);
583
584nla_put_failure:
585 nlmsg_cancel(skb, nlh);
586 return -EMSGSIZE;
587}
588
589static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
590 struct fib_rules_ops *ops)
591{
592 int idx = 0;
593 struct fib_rule *rule;
594
595 rcu_read_lock();
596 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
597 if (idx < cb->args[1])
598 goto skip;
599
600 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
601 cb->nlh->nlmsg_seq, RTM_NEWRULE,
602 NLM_F_MULTI, ops) < 0)
603 break;
604skip:
605 idx++;
606 }
607 rcu_read_unlock();
608 cb->args[1] = idx;
609 rules_ops_put(ops);
610
611 return skb->len;
612}
613
614static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
615{
616 struct net *net = sock_net(skb->sk);
617 struct fib_rules_ops *ops;
618 int idx = 0, family;
619
620 family = rtnl_msg_family(cb->nlh);
621 if (family != AF_UNSPEC) {
622 /* Protocol specific dump request */
623 ops = lookup_rules_ops(net, family);
624 if (ops == NULL)
625 return -EAFNOSUPPORT;
626
627 return dump_rules(skb, cb, ops);
628 }
629
630 rcu_read_lock();
631 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
632 if (idx < cb->args[0] || !try_module_get(ops->owner))
633 goto skip;
634
635 if (dump_rules(skb, cb, ops) < 0)
636 break;
637
638 cb->args[1] = 0;
639skip:
640 idx++;
641 }
642 rcu_read_unlock();
643 cb->args[0] = idx;
644
645 return skb->len;
646}
647
648static void notify_rule_change(int event, struct fib_rule *rule,
649 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
650 u32 pid)
651{
652 struct net *net;
653 struct sk_buff *skb;
654 int err = -ENOBUFS;
655
656 net = ops->fro_net;
657 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
658 if (skb == NULL)
659 goto errout;
660
661 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
662 if (err < 0) {
663 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
664 WARN_ON(err == -EMSGSIZE);
665 kfree_skb(skb);
666 goto errout;
667 }
668
669 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
670 return;
671errout:
672 if (err < 0)
673 rtnl_set_sk_err(net, ops->nlgroup, err);
674}
675
676static void attach_rules(struct list_head *rules, struct net_device *dev)
677{
678 struct fib_rule *rule;
679
680 list_for_each_entry(rule, rules, list) {
681 if (rule->iifindex == -1 &&
682 strcmp(dev->name, rule->iifname) == 0)
683 rule->iifindex = dev->ifindex;
684 if (rule->oifindex == -1 &&
685 strcmp(dev->name, rule->oifname) == 0)
686 rule->oifindex = dev->ifindex;
687 }
688}
689
690static void detach_rules(struct list_head *rules, struct net_device *dev)
691{
692 struct fib_rule *rule;
693
694 list_for_each_entry(rule, rules, list) {
695 if (rule->iifindex == dev->ifindex)
696 rule->iifindex = -1;
697 if (rule->oifindex == dev->ifindex)
698 rule->oifindex = -1;
699 }
700}
701
702
703static int fib_rules_event(struct notifier_block *this, unsigned long event,
704 void *ptr)
705{
706 struct net_device *dev = ptr;
707 struct net *net = dev_net(dev);
708 struct fib_rules_ops *ops;
709
710 ASSERT_RTNL();
711
712 switch (event) {
713 case NETDEV_REGISTER:
714 list_for_each_entry(ops, &net->rules_ops, list)
715 attach_rules(&ops->rules_list, dev);
716 break;
717
718 case NETDEV_UNREGISTER:
719 list_for_each_entry(ops, &net->rules_ops, list)
720 detach_rules(&ops->rules_list, dev);
721 break;
722 }
723
724 return NOTIFY_DONE;
725}
726
727static struct notifier_block fib_rules_notifier = {
728 .notifier_call = fib_rules_event,
729};
730
731static int __net_init fib_rules_net_init(struct net *net)
732{
733 INIT_LIST_HEAD(&net->rules_ops);
734 spin_lock_init(&net->rules_mod_lock);
735 return 0;
736}
737
738static struct pernet_operations fib_rules_net_ops = {
739 .init = fib_rules_net_init,
740};
741
742static int __init fib_rules_init(void)
743{
744 int err;
745 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
746 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
747 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
748
749 err = register_pernet_subsys(&fib_rules_net_ops);
750 if (err < 0)
751 goto fail;
752
753 err = register_netdevice_notifier(&fib_rules_notifier);
754 if (err < 0)
755 goto fail_unregister;
756
757 return 0;
758
759fail_unregister:
760 unregister_pernet_subsys(&fib_rules_net_ops);
761fail:
762 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
763 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
764 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
765 return err;
766}
767
768subsys_initcall(fib_rules_init);