Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */
9
10/* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
19 * Ville Nuorvala
20 * Fixed routing subtrees.
21 */
22
23#define pr_fmt(fmt) "IPv6: " fmt
24
25#include <linux/capability.h>
26#include <linux/errno.h>
27#include <linux/export.h>
28#include <linux/types.h>
29#include <linux/times.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/route.h>
34#include <linux/netdevice.h>
35#include <linux/in6.h>
36#include <linux/mroute6.h>
37#include <linux/init.h>
38#include <linux/if_arp.h>
39#include <linux/proc_fs.h>
40#include <linux/seq_file.h>
41#include <linux/nsproxy.h>
42#include <linux/slab.h>
43#include <linux/jhash.h>
44#include <linux/siphash.h>
45#include <net/net_namespace.h>
46#include <net/snmp.h>
47#include <net/ipv6.h>
48#include <net/ip6_fib.h>
49#include <net/ip6_route.h>
50#include <net/ndisc.h>
51#include <net/addrconf.h>
52#include <net/tcp.h>
53#include <linux/rtnetlink.h>
54#include <net/dst.h>
55#include <net/dst_metadata.h>
56#include <net/xfrm.h>
57#include <net/netevent.h>
58#include <net/netlink.h>
59#include <net/rtnh.h>
60#include <net/lwtunnel.h>
61#include <net/ip_tunnels.h>
62#include <net/l3mdev.h>
63#include <net/ip.h>
64#include <linux/uaccess.h>
65#include <linux/btf_ids.h>
66
67#ifdef CONFIG_SYSCTL
68#include <linux/sysctl.h>
69#endif
70
71static int ip6_rt_type_to_error(u8 fib6_type);
72
73#define CREATE_TRACE_POINTS
74#include <trace/events/fib6.h>
75EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76#undef CREATE_TRACE_POINTS
77
78enum rt6_nud_state {
79 RT6_NUD_FAIL_HARD = -3,
80 RT6_NUD_FAIL_PROBE = -2,
81 RT6_NUD_FAIL_DO_RR = -1,
82 RT6_NUD_SUCCEED = 1
83};
84
85INDIRECT_CALLABLE_SCOPE
86struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
87static unsigned int ip6_default_advmss(const struct dst_entry *dst);
88INDIRECT_CALLABLE_SCOPE
89unsigned int ip6_mtu(const struct dst_entry *dst);
90static struct dst_entry *ip6_negative_advice(struct dst_entry *);
91static void ip6_dst_destroy(struct dst_entry *);
92static void ip6_dst_ifdown(struct dst_entry *,
93 struct net_device *dev, int how);
94static int ip6_dst_gc(struct dst_ops *ops);
95
96static int ip6_pkt_discard(struct sk_buff *skb);
97static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
98static int ip6_pkt_prohibit(struct sk_buff *skb);
99static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
100static void ip6_link_failure(struct sk_buff *skb);
101static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
102 struct sk_buff *skb, u32 mtu,
103 bool confirm_neigh);
104static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
105 struct sk_buff *skb);
106static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
107 int strict);
108static size_t rt6_nlmsg_size(struct fib6_info *f6i);
109static int rt6_fill_node(struct net *net, struct sk_buff *skb,
110 struct fib6_info *rt, struct dst_entry *dst,
111 struct in6_addr *dest, struct in6_addr *src,
112 int iif, int type, u32 portid, u32 seq,
113 unsigned int flags);
114static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
115 const struct in6_addr *daddr,
116 const struct in6_addr *saddr);
117
118#ifdef CONFIG_IPV6_ROUTE_INFO
119static struct fib6_info *rt6_add_route_info(struct net *net,
120 const struct in6_addr *prefix, int prefixlen,
121 const struct in6_addr *gwaddr,
122 struct net_device *dev,
123 unsigned int pref);
124static struct fib6_info *rt6_get_route_info(struct net *net,
125 const struct in6_addr *prefix, int prefixlen,
126 const struct in6_addr *gwaddr,
127 struct net_device *dev);
128#endif
129
130struct uncached_list {
131 spinlock_t lock;
132 struct list_head head;
133};
134
135static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
136
137void rt6_uncached_list_add(struct rt6_info *rt)
138{
139 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
140
141 rt->rt6i_uncached_list = ul;
142
143 spin_lock_bh(&ul->lock);
144 list_add_tail(&rt->rt6i_uncached, &ul->head);
145 spin_unlock_bh(&ul->lock);
146}
147
148void rt6_uncached_list_del(struct rt6_info *rt)
149{
150 if (!list_empty(&rt->rt6i_uncached)) {
151 struct uncached_list *ul = rt->rt6i_uncached_list;
152 struct net *net = dev_net(rt->dst.dev);
153
154 spin_lock_bh(&ul->lock);
155 list_del(&rt->rt6i_uncached);
156 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
157 spin_unlock_bh(&ul->lock);
158 }
159}
160
161static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
162{
163 struct net_device *loopback_dev = net->loopback_dev;
164 int cpu;
165
166 if (dev == loopback_dev)
167 return;
168
169 for_each_possible_cpu(cpu) {
170 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
171 struct rt6_info *rt;
172
173 spin_lock_bh(&ul->lock);
174 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
175 struct inet6_dev *rt_idev = rt->rt6i_idev;
176 struct net_device *rt_dev = rt->dst.dev;
177
178 if (rt_idev->dev == dev) {
179 rt->rt6i_idev = in6_dev_get(loopback_dev);
180 in6_dev_put(rt_idev);
181 }
182
183 if (rt_dev == dev) {
184 rt->dst.dev = blackhole_netdev;
185 dev_hold(rt->dst.dev);
186 dev_put(rt_dev);
187 }
188 }
189 spin_unlock_bh(&ul->lock);
190 }
191}
192
193static inline const void *choose_neigh_daddr(const struct in6_addr *p,
194 struct sk_buff *skb,
195 const void *daddr)
196{
197 if (!ipv6_addr_any(p))
198 return (const void *) p;
199 else if (skb)
200 return &ipv6_hdr(skb)->daddr;
201 return daddr;
202}
203
204struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
205 struct net_device *dev,
206 struct sk_buff *skb,
207 const void *daddr)
208{
209 struct neighbour *n;
210
211 daddr = choose_neigh_daddr(gw, skb, daddr);
212 n = __ipv6_neigh_lookup(dev, daddr);
213 if (n)
214 return n;
215
216 n = neigh_create(&nd_tbl, daddr, dev);
217 return IS_ERR(n) ? NULL : n;
218}
219
220static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
221 struct sk_buff *skb,
222 const void *daddr)
223{
224 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
225
226 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
227 dst->dev, skb, daddr);
228}
229
230static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
231{
232 struct net_device *dev = dst->dev;
233 struct rt6_info *rt = (struct rt6_info *)dst;
234
235 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
236 if (!daddr)
237 return;
238 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
239 return;
240 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
241 return;
242 __ipv6_confirm_neigh(dev, daddr);
243}
244
245static struct dst_ops ip6_dst_ops_template = {
246 .family = AF_INET6,
247 .gc = ip6_dst_gc,
248 .gc_thresh = 1024,
249 .check = ip6_dst_check,
250 .default_advmss = ip6_default_advmss,
251 .mtu = ip6_mtu,
252 .cow_metrics = dst_cow_metrics_generic,
253 .destroy = ip6_dst_destroy,
254 .ifdown = ip6_dst_ifdown,
255 .negative_advice = ip6_negative_advice,
256 .link_failure = ip6_link_failure,
257 .update_pmtu = ip6_rt_update_pmtu,
258 .redirect = rt6_do_redirect,
259 .local_out = __ip6_local_out,
260 .neigh_lookup = ip6_dst_neigh_lookup,
261 .confirm_neigh = ip6_confirm_neigh,
262};
263
264static struct dst_ops ip6_dst_blackhole_ops = {
265 .family = AF_INET6,
266 .default_advmss = ip6_default_advmss,
267 .neigh_lookup = ip6_dst_neigh_lookup,
268 .check = ip6_dst_check,
269 .destroy = ip6_dst_destroy,
270 .cow_metrics = dst_cow_metrics_generic,
271 .update_pmtu = dst_blackhole_update_pmtu,
272 .redirect = dst_blackhole_redirect,
273 .mtu = dst_blackhole_mtu,
274};
275
276static const u32 ip6_template_metrics[RTAX_MAX] = {
277 [RTAX_HOPLIMIT - 1] = 0,
278};
279
280static const struct fib6_info fib6_null_entry_template = {
281 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
282 .fib6_protocol = RTPROT_KERNEL,
283 .fib6_metric = ~(u32)0,
284 .fib6_ref = REFCOUNT_INIT(1),
285 .fib6_type = RTN_UNREACHABLE,
286 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
287};
288
289static const struct rt6_info ip6_null_entry_template = {
290 .dst = {
291 .__refcnt = ATOMIC_INIT(1),
292 .__use = 1,
293 .obsolete = DST_OBSOLETE_FORCE_CHK,
294 .error = -ENETUNREACH,
295 .input = ip6_pkt_discard,
296 .output = ip6_pkt_discard_out,
297 },
298 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
299};
300
301#ifdef CONFIG_IPV6_MULTIPLE_TABLES
302
303static const struct rt6_info ip6_prohibit_entry_template = {
304 .dst = {
305 .__refcnt = ATOMIC_INIT(1),
306 .__use = 1,
307 .obsolete = DST_OBSOLETE_FORCE_CHK,
308 .error = -EACCES,
309 .input = ip6_pkt_prohibit,
310 .output = ip6_pkt_prohibit_out,
311 },
312 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
313};
314
315static const struct rt6_info ip6_blk_hole_entry_template = {
316 .dst = {
317 .__refcnt = ATOMIC_INIT(1),
318 .__use = 1,
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
320 .error = -EINVAL,
321 .input = dst_discard,
322 .output = dst_discard_out,
323 },
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
325};
326
327#endif
328
329static void rt6_info_init(struct rt6_info *rt)
330{
331 struct dst_entry *dst = &rt->dst;
332
333 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
334 INIT_LIST_HEAD(&rt->rt6i_uncached);
335}
336
337/* allocate dst with ip6_dst_ops */
338struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
339 int flags)
340{
341 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
342 1, DST_OBSOLETE_FORCE_CHK, flags);
343
344 if (rt) {
345 rt6_info_init(rt);
346 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
347 }
348
349 return rt;
350}
351EXPORT_SYMBOL(ip6_dst_alloc);
352
353static void ip6_dst_destroy(struct dst_entry *dst)
354{
355 struct rt6_info *rt = (struct rt6_info *)dst;
356 struct fib6_info *from;
357 struct inet6_dev *idev;
358
359 ip_dst_metrics_put(dst);
360 rt6_uncached_list_del(rt);
361
362 idev = rt->rt6i_idev;
363 if (idev) {
364 rt->rt6i_idev = NULL;
365 in6_dev_put(idev);
366 }
367
368 from = xchg((__force struct fib6_info **)&rt->from, NULL);
369 fib6_info_release(from);
370}
371
372static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
373 int how)
374{
375 struct rt6_info *rt = (struct rt6_info *)dst;
376 struct inet6_dev *idev = rt->rt6i_idev;
377 struct net_device *loopback_dev =
378 dev_net(dev)->loopback_dev;
379
380 if (idev && idev->dev != loopback_dev) {
381 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
382 if (loopback_idev) {
383 rt->rt6i_idev = loopback_idev;
384 in6_dev_put(idev);
385 }
386 }
387}
388
389static bool __rt6_check_expired(const struct rt6_info *rt)
390{
391 if (rt->rt6i_flags & RTF_EXPIRES)
392 return time_after(jiffies, rt->dst.expires);
393 else
394 return false;
395}
396
397static bool rt6_check_expired(const struct rt6_info *rt)
398{
399 struct fib6_info *from;
400
401 from = rcu_dereference(rt->from);
402
403 if (rt->rt6i_flags & RTF_EXPIRES) {
404 if (time_after(jiffies, rt->dst.expires))
405 return true;
406 } else if (from) {
407 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
408 fib6_check_expired(from);
409 }
410 return false;
411}
412
413void fib6_select_path(const struct net *net, struct fib6_result *res,
414 struct flowi6 *fl6, int oif, bool have_oif_match,
415 const struct sk_buff *skb, int strict)
416{
417 struct fib6_info *sibling, *next_sibling;
418 struct fib6_info *match = res->f6i;
419
420 if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
421 goto out;
422
423 if (match->nh && have_oif_match && res->nh)
424 return;
425
426 /* We might have already computed the hash for ICMPv6 errors. In such
427 * case it will always be non-zero. Otherwise now is the time to do it.
428 */
429 if (!fl6->mp_hash &&
430 (!match->nh || nexthop_is_multipath(match->nh)))
431 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
432
433 if (unlikely(match->nh)) {
434 nexthop_path_fib6_result(res, fl6->mp_hash);
435 return;
436 }
437
438 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
439 goto out;
440
441 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
442 fib6_siblings) {
443 const struct fib6_nh *nh = sibling->fib6_nh;
444 int nh_upper_bound;
445
446 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
447 if (fl6->mp_hash > nh_upper_bound)
448 continue;
449 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
450 break;
451 match = sibling;
452 break;
453 }
454
455out:
456 res->f6i = match;
457 res->nh = match->fib6_nh;
458}
459
460/*
461 * Route lookup. rcu_read_lock() should be held.
462 */
463
464static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
465 const struct in6_addr *saddr, int oif, int flags)
466{
467 const struct net_device *dev;
468
469 if (nh->fib_nh_flags & RTNH_F_DEAD)
470 return false;
471
472 dev = nh->fib_nh_dev;
473 if (oif) {
474 if (dev->ifindex == oif)
475 return true;
476 } else {
477 if (ipv6_chk_addr(net, saddr, dev,
478 flags & RT6_LOOKUP_F_IFACE))
479 return true;
480 }
481
482 return false;
483}
484
485struct fib6_nh_dm_arg {
486 struct net *net;
487 const struct in6_addr *saddr;
488 int oif;
489 int flags;
490 struct fib6_nh *nh;
491};
492
493static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
494{
495 struct fib6_nh_dm_arg *arg = _arg;
496
497 arg->nh = nh;
498 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
499 arg->flags);
500}
501
502/* returns fib6_nh from nexthop or NULL */
503static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
504 struct fib6_result *res,
505 const struct in6_addr *saddr,
506 int oif, int flags)
507{
508 struct fib6_nh_dm_arg arg = {
509 .net = net,
510 .saddr = saddr,
511 .oif = oif,
512 .flags = flags,
513 };
514
515 if (nexthop_is_blackhole(nh))
516 return NULL;
517
518 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
519 return arg.nh;
520
521 return NULL;
522}
523
524static void rt6_device_match(struct net *net, struct fib6_result *res,
525 const struct in6_addr *saddr, int oif, int flags)
526{
527 struct fib6_info *f6i = res->f6i;
528 struct fib6_info *spf6i;
529 struct fib6_nh *nh;
530
531 if (!oif && ipv6_addr_any(saddr)) {
532 if (unlikely(f6i->nh)) {
533 nh = nexthop_fib6_nh(f6i->nh);
534 if (nexthop_is_blackhole(f6i->nh))
535 goto out_blackhole;
536 } else {
537 nh = f6i->fib6_nh;
538 }
539 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
540 goto out;
541 }
542
543 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
544 bool matched = false;
545
546 if (unlikely(spf6i->nh)) {
547 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
548 oif, flags);
549 if (nh)
550 matched = true;
551 } else {
552 nh = spf6i->fib6_nh;
553 if (__rt6_device_match(net, nh, saddr, oif, flags))
554 matched = true;
555 }
556 if (matched) {
557 res->f6i = spf6i;
558 goto out;
559 }
560 }
561
562 if (oif && flags & RT6_LOOKUP_F_IFACE) {
563 res->f6i = net->ipv6.fib6_null_entry;
564 nh = res->f6i->fib6_nh;
565 goto out;
566 }
567
568 if (unlikely(f6i->nh)) {
569 nh = nexthop_fib6_nh(f6i->nh);
570 if (nexthop_is_blackhole(f6i->nh))
571 goto out_blackhole;
572 } else {
573 nh = f6i->fib6_nh;
574 }
575
576 if (nh->fib_nh_flags & RTNH_F_DEAD) {
577 res->f6i = net->ipv6.fib6_null_entry;
578 nh = res->f6i->fib6_nh;
579 }
580out:
581 res->nh = nh;
582 res->fib6_type = res->f6i->fib6_type;
583 res->fib6_flags = res->f6i->fib6_flags;
584 return;
585
586out_blackhole:
587 res->fib6_flags |= RTF_REJECT;
588 res->fib6_type = RTN_BLACKHOLE;
589 res->nh = nh;
590}
591
592#ifdef CONFIG_IPV6_ROUTER_PREF
593struct __rt6_probe_work {
594 struct work_struct work;
595 struct in6_addr target;
596 struct net_device *dev;
597};
598
599static void rt6_probe_deferred(struct work_struct *w)
600{
601 struct in6_addr mcaddr;
602 struct __rt6_probe_work *work =
603 container_of(w, struct __rt6_probe_work, work);
604
605 addrconf_addr_solict_mult(&work->target, &mcaddr);
606 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
607 dev_put(work->dev);
608 kfree(work);
609}
610
611static void rt6_probe(struct fib6_nh *fib6_nh)
612{
613 struct __rt6_probe_work *work = NULL;
614 const struct in6_addr *nh_gw;
615 unsigned long last_probe;
616 struct neighbour *neigh;
617 struct net_device *dev;
618 struct inet6_dev *idev;
619
620 /*
621 * Okay, this does not seem to be appropriate
622 * for now, however, we need to check if it
623 * is really so; aka Router Reachability Probing.
624 *
625 * Router Reachability Probe MUST be rate-limited
626 * to no more than one per minute.
627 */
628 if (!fib6_nh->fib_nh_gw_family)
629 return;
630
631 nh_gw = &fib6_nh->fib_nh_gw6;
632 dev = fib6_nh->fib_nh_dev;
633 rcu_read_lock_bh();
634 last_probe = READ_ONCE(fib6_nh->last_probe);
635 idev = __in6_dev_get(dev);
636 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
637 if (neigh) {
638 if (neigh->nud_state & NUD_VALID)
639 goto out;
640
641 write_lock(&neigh->lock);
642 if (!(neigh->nud_state & NUD_VALID) &&
643 time_after(jiffies,
644 neigh->updated + idev->cnf.rtr_probe_interval)) {
645 work = kmalloc(sizeof(*work), GFP_ATOMIC);
646 if (work)
647 __neigh_set_probe_once(neigh);
648 }
649 write_unlock(&neigh->lock);
650 } else if (time_after(jiffies, last_probe +
651 idev->cnf.rtr_probe_interval)) {
652 work = kmalloc(sizeof(*work), GFP_ATOMIC);
653 }
654
655 if (!work || cmpxchg(&fib6_nh->last_probe,
656 last_probe, jiffies) != last_probe) {
657 kfree(work);
658 } else {
659 INIT_WORK(&work->work, rt6_probe_deferred);
660 work->target = *nh_gw;
661 dev_hold(dev);
662 work->dev = dev;
663 schedule_work(&work->work);
664 }
665
666out:
667 rcu_read_unlock_bh();
668}
669#else
670static inline void rt6_probe(struct fib6_nh *fib6_nh)
671{
672}
673#endif
674
675/*
676 * Default Router Selection (RFC 2461 6.3.6)
677 */
678static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
679{
680 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
681 struct neighbour *neigh;
682
683 rcu_read_lock_bh();
684 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
685 &fib6_nh->fib_nh_gw6);
686 if (neigh) {
687 read_lock(&neigh->lock);
688 if (neigh->nud_state & NUD_VALID)
689 ret = RT6_NUD_SUCCEED;
690#ifdef CONFIG_IPV6_ROUTER_PREF
691 else if (!(neigh->nud_state & NUD_FAILED))
692 ret = RT6_NUD_SUCCEED;
693 else
694 ret = RT6_NUD_FAIL_PROBE;
695#endif
696 read_unlock(&neigh->lock);
697 } else {
698 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
699 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
700 }
701 rcu_read_unlock_bh();
702
703 return ret;
704}
705
706static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
707 int strict)
708{
709 int m = 0;
710
711 if (!oif || nh->fib_nh_dev->ifindex == oif)
712 m = 2;
713
714 if (!m && (strict & RT6_LOOKUP_F_IFACE))
715 return RT6_NUD_FAIL_HARD;
716#ifdef CONFIG_IPV6_ROUTER_PREF
717 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
718#endif
719 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
720 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
721 int n = rt6_check_neigh(nh);
722 if (n < 0)
723 return n;
724 }
725 return m;
726}
727
728static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
729 int oif, int strict, int *mpri, bool *do_rr)
730{
731 bool match_do_rr = false;
732 bool rc = false;
733 int m;
734
735 if (nh->fib_nh_flags & RTNH_F_DEAD)
736 goto out;
737
738 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
739 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
740 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
741 goto out;
742
743 m = rt6_score_route(nh, fib6_flags, oif, strict);
744 if (m == RT6_NUD_FAIL_DO_RR) {
745 match_do_rr = true;
746 m = 0; /* lowest valid score */
747 } else if (m == RT6_NUD_FAIL_HARD) {
748 goto out;
749 }
750
751 if (strict & RT6_LOOKUP_F_REACHABLE)
752 rt6_probe(nh);
753
754 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
755 if (m > *mpri) {
756 *do_rr = match_do_rr;
757 *mpri = m;
758 rc = true;
759 }
760out:
761 return rc;
762}
763
764struct fib6_nh_frl_arg {
765 u32 flags;
766 int oif;
767 int strict;
768 int *mpri;
769 bool *do_rr;
770 struct fib6_nh *nh;
771};
772
773static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
774{
775 struct fib6_nh_frl_arg *arg = _arg;
776
777 arg->nh = nh;
778 return find_match(nh, arg->flags, arg->oif, arg->strict,
779 arg->mpri, arg->do_rr);
780}
781
782static void __find_rr_leaf(struct fib6_info *f6i_start,
783 struct fib6_info *nomatch, u32 metric,
784 struct fib6_result *res, struct fib6_info **cont,
785 int oif, int strict, bool *do_rr, int *mpri)
786{
787 struct fib6_info *f6i;
788
789 for (f6i = f6i_start;
790 f6i && f6i != nomatch;
791 f6i = rcu_dereference(f6i->fib6_next)) {
792 bool matched = false;
793 struct fib6_nh *nh;
794
795 if (cont && f6i->fib6_metric != metric) {
796 *cont = f6i;
797 return;
798 }
799
800 if (fib6_check_expired(f6i))
801 continue;
802
803 if (unlikely(f6i->nh)) {
804 struct fib6_nh_frl_arg arg = {
805 .flags = f6i->fib6_flags,
806 .oif = oif,
807 .strict = strict,
808 .mpri = mpri,
809 .do_rr = do_rr
810 };
811
812 if (nexthop_is_blackhole(f6i->nh)) {
813 res->fib6_flags = RTF_REJECT;
814 res->fib6_type = RTN_BLACKHOLE;
815 res->f6i = f6i;
816 res->nh = nexthop_fib6_nh(f6i->nh);
817 return;
818 }
819 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
820 &arg)) {
821 matched = true;
822 nh = arg.nh;
823 }
824 } else {
825 nh = f6i->fib6_nh;
826 if (find_match(nh, f6i->fib6_flags, oif, strict,
827 mpri, do_rr))
828 matched = true;
829 }
830 if (matched) {
831 res->f6i = f6i;
832 res->nh = nh;
833 res->fib6_flags = f6i->fib6_flags;
834 res->fib6_type = f6i->fib6_type;
835 }
836 }
837}
838
839static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
840 struct fib6_info *rr_head, int oif, int strict,
841 bool *do_rr, struct fib6_result *res)
842{
843 u32 metric = rr_head->fib6_metric;
844 struct fib6_info *cont = NULL;
845 int mpri = -1;
846
847 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
848 oif, strict, do_rr, &mpri);
849
850 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
851 oif, strict, do_rr, &mpri);
852
853 if (res->f6i || !cont)
854 return;
855
856 __find_rr_leaf(cont, NULL, metric, res, NULL,
857 oif, strict, do_rr, &mpri);
858}
859
860static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
861 struct fib6_result *res, int strict)
862{
863 struct fib6_info *leaf = rcu_dereference(fn->leaf);
864 struct fib6_info *rt0;
865 bool do_rr = false;
866 int key_plen;
867
868 /* make sure this function or its helpers sets f6i */
869 res->f6i = NULL;
870
871 if (!leaf || leaf == net->ipv6.fib6_null_entry)
872 goto out;
873
874 rt0 = rcu_dereference(fn->rr_ptr);
875 if (!rt0)
876 rt0 = leaf;
877
878 /* Double check to make sure fn is not an intermediate node
879 * and fn->leaf does not points to its child's leaf
880 * (This might happen if all routes under fn are deleted from
881 * the tree and fib6_repair_tree() is called on the node.)
882 */
883 key_plen = rt0->fib6_dst.plen;
884#ifdef CONFIG_IPV6_SUBTREES
885 if (rt0->fib6_src.plen)
886 key_plen = rt0->fib6_src.plen;
887#endif
888 if (fn->fn_bit != key_plen)
889 goto out;
890
891 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
892 if (do_rr) {
893 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
894
895 /* no entries matched; do round-robin */
896 if (!next || next->fib6_metric != rt0->fib6_metric)
897 next = leaf;
898
899 if (next != rt0) {
900 spin_lock_bh(&leaf->fib6_table->tb6_lock);
901 /* make sure next is not being deleted from the tree */
902 if (next->fib6_node)
903 rcu_assign_pointer(fn->rr_ptr, next);
904 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
905 }
906 }
907
908out:
909 if (!res->f6i) {
910 res->f6i = net->ipv6.fib6_null_entry;
911 res->nh = res->f6i->fib6_nh;
912 res->fib6_flags = res->f6i->fib6_flags;
913 res->fib6_type = res->f6i->fib6_type;
914 }
915}
916
917static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
918{
919 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
920 res->nh->fib_nh_gw_family;
921}
922
923#ifdef CONFIG_IPV6_ROUTE_INFO
924int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
925 const struct in6_addr *gwaddr)
926{
927 struct net *net = dev_net(dev);
928 struct route_info *rinfo = (struct route_info *) opt;
929 struct in6_addr prefix_buf, *prefix;
930 unsigned int pref;
931 unsigned long lifetime;
932 struct fib6_info *rt;
933
934 if (len < sizeof(struct route_info)) {
935 return -EINVAL;
936 }
937
938 /* Sanity check for prefix_len and length */
939 if (rinfo->length > 3) {
940 return -EINVAL;
941 } else if (rinfo->prefix_len > 128) {
942 return -EINVAL;
943 } else if (rinfo->prefix_len > 64) {
944 if (rinfo->length < 2) {
945 return -EINVAL;
946 }
947 } else if (rinfo->prefix_len > 0) {
948 if (rinfo->length < 1) {
949 return -EINVAL;
950 }
951 }
952
953 pref = rinfo->route_pref;
954 if (pref == ICMPV6_ROUTER_PREF_INVALID)
955 return -EINVAL;
956
957 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
958
959 if (rinfo->length == 3)
960 prefix = (struct in6_addr *)rinfo->prefix;
961 else {
962 /* this function is safe */
963 ipv6_addr_prefix(&prefix_buf,
964 (struct in6_addr *)rinfo->prefix,
965 rinfo->prefix_len);
966 prefix = &prefix_buf;
967 }
968
969 if (rinfo->prefix_len == 0)
970 rt = rt6_get_dflt_router(net, gwaddr, dev);
971 else
972 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
973 gwaddr, dev);
974
975 if (rt && !lifetime) {
976 ip6_del_rt(net, rt, false);
977 rt = NULL;
978 }
979
980 if (!rt && lifetime)
981 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
982 dev, pref);
983 else if (rt)
984 rt->fib6_flags = RTF_ROUTEINFO |
985 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
986
987 if (rt) {
988 if (!addrconf_finite_timeout(lifetime))
989 fib6_clean_expires(rt);
990 else
991 fib6_set_expires(rt, jiffies + HZ * lifetime);
992
993 fib6_info_release(rt);
994 }
995 return 0;
996}
997#endif
998
999/*
1000 * Misc support functions
1001 */
1002
1003/* called with rcu_lock held */
1004static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1005{
1006 struct net_device *dev = res->nh->fib_nh_dev;
1007
1008 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1009 /* for copies of local routes, dst->dev needs to be the
1010 * device if it is a master device, the master device if
1011 * device is enslaved, and the loopback as the default
1012 */
1013 if (netif_is_l3_slave(dev) &&
1014 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1015 dev = l3mdev_master_dev_rcu(dev);
1016 else if (!netif_is_l3_master(dev))
1017 dev = dev_net(dev)->loopback_dev;
1018 /* last case is netif_is_l3_master(dev) is true in which
1019 * case we want dev returned to be dev
1020 */
1021 }
1022
1023 return dev;
1024}
1025
1026static const int fib6_prop[RTN_MAX + 1] = {
1027 [RTN_UNSPEC] = 0,
1028 [RTN_UNICAST] = 0,
1029 [RTN_LOCAL] = 0,
1030 [RTN_BROADCAST] = 0,
1031 [RTN_ANYCAST] = 0,
1032 [RTN_MULTICAST] = 0,
1033 [RTN_BLACKHOLE] = -EINVAL,
1034 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1035 [RTN_PROHIBIT] = -EACCES,
1036 [RTN_THROW] = -EAGAIN,
1037 [RTN_NAT] = -EINVAL,
1038 [RTN_XRESOLVE] = -EINVAL,
1039};
1040
1041static int ip6_rt_type_to_error(u8 fib6_type)
1042{
1043 return fib6_prop[fib6_type];
1044}
1045
1046static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1047{
1048 unsigned short flags = 0;
1049
1050 if (rt->dst_nocount)
1051 flags |= DST_NOCOUNT;
1052 if (rt->dst_nopolicy)
1053 flags |= DST_NOPOLICY;
1054
1055 return flags;
1056}
1057
1058static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1059{
1060 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1061
1062 switch (fib6_type) {
1063 case RTN_BLACKHOLE:
1064 rt->dst.output = dst_discard_out;
1065 rt->dst.input = dst_discard;
1066 break;
1067 case RTN_PROHIBIT:
1068 rt->dst.output = ip6_pkt_prohibit_out;
1069 rt->dst.input = ip6_pkt_prohibit;
1070 break;
1071 case RTN_THROW:
1072 case RTN_UNREACHABLE:
1073 default:
1074 rt->dst.output = ip6_pkt_discard_out;
1075 rt->dst.input = ip6_pkt_discard;
1076 break;
1077 }
1078}
1079
1080static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1081{
1082 struct fib6_info *f6i = res->f6i;
1083
1084 if (res->fib6_flags & RTF_REJECT) {
1085 ip6_rt_init_dst_reject(rt, res->fib6_type);
1086 return;
1087 }
1088
1089 rt->dst.error = 0;
1090 rt->dst.output = ip6_output;
1091
1092 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1093 rt->dst.input = ip6_input;
1094 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1095 rt->dst.input = ip6_mc_input;
1096 } else {
1097 rt->dst.input = ip6_forward;
1098 }
1099
1100 if (res->nh->fib_nh_lws) {
1101 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1102 lwtunnel_set_redirect(&rt->dst);
1103 }
1104
1105 rt->dst.lastuse = jiffies;
1106}
1107
1108/* Caller must already hold reference to @from */
1109static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1110{
1111 rt->rt6i_flags &= ~RTF_EXPIRES;
1112 rcu_assign_pointer(rt->from, from);
1113 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1114}
1115
1116/* Caller must already hold reference to f6i in result */
1117static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1118{
1119 const struct fib6_nh *nh = res->nh;
1120 const struct net_device *dev = nh->fib_nh_dev;
1121 struct fib6_info *f6i = res->f6i;
1122
1123 ip6_rt_init_dst(rt, res);
1124
1125 rt->rt6i_dst = f6i->fib6_dst;
1126 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1127 rt->rt6i_flags = res->fib6_flags;
1128 if (nh->fib_nh_gw_family) {
1129 rt->rt6i_gateway = nh->fib_nh_gw6;
1130 rt->rt6i_flags |= RTF_GATEWAY;
1131 }
1132 rt6_set_from(rt, f6i);
1133#ifdef CONFIG_IPV6_SUBTREES
1134 rt->rt6i_src = f6i->fib6_src;
1135#endif
1136}
1137
1138static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1139 struct in6_addr *saddr)
1140{
1141 struct fib6_node *pn, *sn;
1142 while (1) {
1143 if (fn->fn_flags & RTN_TL_ROOT)
1144 return NULL;
1145 pn = rcu_dereference(fn->parent);
1146 sn = FIB6_SUBTREE(pn);
1147 if (sn && sn != fn)
1148 fn = fib6_node_lookup(sn, NULL, saddr);
1149 else
1150 fn = pn;
1151 if (fn->fn_flags & RTN_RTINFO)
1152 return fn;
1153 }
1154}
1155
1156static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1157{
1158 struct rt6_info *rt = *prt;
1159
1160 if (dst_hold_safe(&rt->dst))
1161 return true;
1162 if (net) {
1163 rt = net->ipv6.ip6_null_entry;
1164 dst_hold(&rt->dst);
1165 } else {
1166 rt = NULL;
1167 }
1168 *prt = rt;
1169 return false;
1170}
1171
1172/* called with rcu_lock held */
1173static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1174{
1175 struct net_device *dev = res->nh->fib_nh_dev;
1176 struct fib6_info *f6i = res->f6i;
1177 unsigned short flags;
1178 struct rt6_info *nrt;
1179
1180 if (!fib6_info_hold_safe(f6i))
1181 goto fallback;
1182
1183 flags = fib6_info_dst_flags(f6i);
1184 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1185 if (!nrt) {
1186 fib6_info_release(f6i);
1187 goto fallback;
1188 }
1189
1190 ip6_rt_copy_init(nrt, res);
1191 return nrt;
1192
1193fallback:
1194 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1195 dst_hold(&nrt->dst);
1196 return nrt;
1197}
1198
1199INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1200 struct fib6_table *table,
1201 struct flowi6 *fl6,
1202 const struct sk_buff *skb,
1203 int flags)
1204{
1205 struct fib6_result res = {};
1206 struct fib6_node *fn;
1207 struct rt6_info *rt;
1208
1209 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1210 flags &= ~RT6_LOOKUP_F_IFACE;
1211
1212 rcu_read_lock();
1213 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1214restart:
1215 res.f6i = rcu_dereference(fn->leaf);
1216 if (!res.f6i)
1217 res.f6i = net->ipv6.fib6_null_entry;
1218 else
1219 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1220 flags);
1221
1222 if (res.f6i == net->ipv6.fib6_null_entry) {
1223 fn = fib6_backtrack(fn, &fl6->saddr);
1224 if (fn)
1225 goto restart;
1226
1227 rt = net->ipv6.ip6_null_entry;
1228 dst_hold(&rt->dst);
1229 goto out;
1230 } else if (res.fib6_flags & RTF_REJECT) {
1231 goto do_create;
1232 }
1233
1234 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1235 fl6->flowi6_oif != 0, skb, flags);
1236
1237 /* Search through exception table */
1238 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1239 if (rt) {
1240 if (ip6_hold_safe(net, &rt))
1241 dst_use_noref(&rt->dst, jiffies);
1242 } else {
1243do_create:
1244 rt = ip6_create_rt_rcu(&res);
1245 }
1246
1247out:
1248 trace_fib6_table_lookup(net, &res, table, fl6);
1249
1250 rcu_read_unlock();
1251
1252 return rt;
1253}
1254
1255struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1256 const struct sk_buff *skb, int flags)
1257{
1258 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1259}
1260EXPORT_SYMBOL_GPL(ip6_route_lookup);
1261
1262struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1263 const struct in6_addr *saddr, int oif,
1264 const struct sk_buff *skb, int strict)
1265{
1266 struct flowi6 fl6 = {
1267 .flowi6_oif = oif,
1268 .daddr = *daddr,
1269 };
1270 struct dst_entry *dst;
1271 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1272
1273 if (saddr) {
1274 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1275 flags |= RT6_LOOKUP_F_HAS_SADDR;
1276 }
1277
1278 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1279 if (dst->error == 0)
1280 return (struct rt6_info *) dst;
1281
1282 dst_release(dst);
1283
1284 return NULL;
1285}
1286EXPORT_SYMBOL(rt6_lookup);
1287
1288/* ip6_ins_rt is called with FREE table->tb6_lock.
1289 * It takes new route entry, the addition fails by any reason the
1290 * route is released.
1291 * Caller must hold dst before calling it.
1292 */
1293
1294static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1295 struct netlink_ext_ack *extack)
1296{
1297 int err;
1298 struct fib6_table *table;
1299
1300 table = rt->fib6_table;
1301 spin_lock_bh(&table->tb6_lock);
1302 err = fib6_add(&table->tb6_root, rt, info, extack);
1303 spin_unlock_bh(&table->tb6_lock);
1304
1305 return err;
1306}
1307
1308int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1309{
1310 struct nl_info info = { .nl_net = net, };
1311
1312 return __ip6_ins_rt(rt, &info, NULL);
1313}
1314
1315static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1316 const struct in6_addr *daddr,
1317 const struct in6_addr *saddr)
1318{
1319 struct fib6_info *f6i = res->f6i;
1320 struct net_device *dev;
1321 struct rt6_info *rt;
1322
1323 /*
1324 * Clone the route.
1325 */
1326
1327 if (!fib6_info_hold_safe(f6i))
1328 return NULL;
1329
1330 dev = ip6_rt_get_dev_rcu(res);
1331 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1332 if (!rt) {
1333 fib6_info_release(f6i);
1334 return NULL;
1335 }
1336
1337 ip6_rt_copy_init(rt, res);
1338 rt->rt6i_flags |= RTF_CACHE;
1339 rt->rt6i_dst.addr = *daddr;
1340 rt->rt6i_dst.plen = 128;
1341
1342 if (!rt6_is_gw_or_nonexthop(res)) {
1343 if (f6i->fib6_dst.plen != 128 &&
1344 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1345 rt->rt6i_flags |= RTF_ANYCAST;
1346#ifdef CONFIG_IPV6_SUBTREES
1347 if (rt->rt6i_src.plen && saddr) {
1348 rt->rt6i_src.addr = *saddr;
1349 rt->rt6i_src.plen = 128;
1350 }
1351#endif
1352 }
1353
1354 return rt;
1355}
1356
1357static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1358{
1359 struct fib6_info *f6i = res->f6i;
1360 unsigned short flags = fib6_info_dst_flags(f6i);
1361 struct net_device *dev;
1362 struct rt6_info *pcpu_rt;
1363
1364 if (!fib6_info_hold_safe(f6i))
1365 return NULL;
1366
1367 rcu_read_lock();
1368 dev = ip6_rt_get_dev_rcu(res);
1369 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1370 rcu_read_unlock();
1371 if (!pcpu_rt) {
1372 fib6_info_release(f6i);
1373 return NULL;
1374 }
1375 ip6_rt_copy_init(pcpu_rt, res);
1376 pcpu_rt->rt6i_flags |= RTF_PCPU;
1377
1378 if (f6i->nh)
1379 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1380
1381 return pcpu_rt;
1382}
1383
1384static bool rt6_is_valid(const struct rt6_info *rt6)
1385{
1386 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1387}
1388
1389/* It should be called with rcu_read_lock() acquired */
1390static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1391{
1392 struct rt6_info *pcpu_rt;
1393
1394 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1395
1396 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1397 struct rt6_info *prev, **p;
1398
1399 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1400 prev = xchg(p, NULL);
1401 if (prev) {
1402 dst_dev_put(&prev->dst);
1403 dst_release(&prev->dst);
1404 }
1405
1406 pcpu_rt = NULL;
1407 }
1408
1409 return pcpu_rt;
1410}
1411
1412static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1413 const struct fib6_result *res)
1414{
1415 struct rt6_info *pcpu_rt, *prev, **p;
1416
1417 pcpu_rt = ip6_rt_pcpu_alloc(res);
1418 if (!pcpu_rt)
1419 return NULL;
1420
1421 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1422 prev = cmpxchg(p, NULL, pcpu_rt);
1423 BUG_ON(prev);
1424
1425 if (res->f6i->fib6_destroying) {
1426 struct fib6_info *from;
1427
1428 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1429 fib6_info_release(from);
1430 }
1431
1432 return pcpu_rt;
1433}
1434
1435/* exception hash table implementation
1436 */
1437static DEFINE_SPINLOCK(rt6_exception_lock);
1438
1439/* Remove rt6_ex from hash table and free the memory
1440 * Caller must hold rt6_exception_lock
1441 */
1442static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1443 struct rt6_exception *rt6_ex)
1444{
1445 struct fib6_info *from;
1446 struct net *net;
1447
1448 if (!bucket || !rt6_ex)
1449 return;
1450
1451 net = dev_net(rt6_ex->rt6i->dst.dev);
1452 net->ipv6.rt6_stats->fib_rt_cache--;
1453
1454 /* purge completely the exception to allow releasing the held resources:
1455 * some [sk] cache may keep the dst around for unlimited time
1456 */
1457 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1458 fib6_info_release(from);
1459 dst_dev_put(&rt6_ex->rt6i->dst);
1460
1461 hlist_del_rcu(&rt6_ex->hlist);
1462 dst_release(&rt6_ex->rt6i->dst);
1463 kfree_rcu(rt6_ex, rcu);
1464 WARN_ON_ONCE(!bucket->depth);
1465 bucket->depth--;
1466}
1467
1468/* Remove oldest rt6_ex in bucket and free the memory
1469 * Caller must hold rt6_exception_lock
1470 */
1471static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1472{
1473 struct rt6_exception *rt6_ex, *oldest = NULL;
1474
1475 if (!bucket)
1476 return;
1477
1478 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1479 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1480 oldest = rt6_ex;
1481 }
1482 rt6_remove_exception(bucket, oldest);
1483}
1484
1485static u32 rt6_exception_hash(const struct in6_addr *dst,
1486 const struct in6_addr *src)
1487{
1488 static siphash_key_t rt6_exception_key __read_mostly;
1489 struct {
1490 struct in6_addr dst;
1491 struct in6_addr src;
1492 } __aligned(SIPHASH_ALIGNMENT) combined = {
1493 .dst = *dst,
1494 };
1495 u64 val;
1496
1497 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1498
1499#ifdef CONFIG_IPV6_SUBTREES
1500 if (src)
1501 combined.src = *src;
1502#endif
1503 val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1504
1505 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1506}
1507
1508/* Helper function to find the cached rt in the hash table
1509 * and update bucket pointer to point to the bucket for this
1510 * (daddr, saddr) pair
1511 * Caller must hold rt6_exception_lock
1512 */
1513static struct rt6_exception *
1514__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1515 const struct in6_addr *daddr,
1516 const struct in6_addr *saddr)
1517{
1518 struct rt6_exception *rt6_ex;
1519 u32 hval;
1520
1521 if (!(*bucket) || !daddr)
1522 return NULL;
1523
1524 hval = rt6_exception_hash(daddr, saddr);
1525 *bucket += hval;
1526
1527 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1528 struct rt6_info *rt6 = rt6_ex->rt6i;
1529 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1530
1531#ifdef CONFIG_IPV6_SUBTREES
1532 if (matched && saddr)
1533 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1534#endif
1535 if (matched)
1536 return rt6_ex;
1537 }
1538 return NULL;
1539}
1540
1541/* Helper function to find the cached rt in the hash table
1542 * and update bucket pointer to point to the bucket for this
1543 * (daddr, saddr) pair
1544 * Caller must hold rcu_read_lock()
1545 */
1546static struct rt6_exception *
1547__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1548 const struct in6_addr *daddr,
1549 const struct in6_addr *saddr)
1550{
1551 struct rt6_exception *rt6_ex;
1552 u32 hval;
1553
1554 WARN_ON_ONCE(!rcu_read_lock_held());
1555
1556 if (!(*bucket) || !daddr)
1557 return NULL;
1558
1559 hval = rt6_exception_hash(daddr, saddr);
1560 *bucket += hval;
1561
1562 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1563 struct rt6_info *rt6 = rt6_ex->rt6i;
1564 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1565
1566#ifdef CONFIG_IPV6_SUBTREES
1567 if (matched && saddr)
1568 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1569#endif
1570 if (matched)
1571 return rt6_ex;
1572 }
1573 return NULL;
1574}
1575
1576static unsigned int fib6_mtu(const struct fib6_result *res)
1577{
1578 const struct fib6_nh *nh = res->nh;
1579 unsigned int mtu;
1580
1581 if (res->f6i->fib6_pmtu) {
1582 mtu = res->f6i->fib6_pmtu;
1583 } else {
1584 struct net_device *dev = nh->fib_nh_dev;
1585 struct inet6_dev *idev;
1586
1587 rcu_read_lock();
1588 idev = __in6_dev_get(dev);
1589 mtu = idev->cnf.mtu6;
1590 rcu_read_unlock();
1591 }
1592
1593 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1594
1595 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1596}
1597
1598#define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1599
1600/* used when the flushed bit is not relevant, only access to the bucket
1601 * (ie., all bucket users except rt6_insert_exception);
1602 *
1603 * called under rcu lock; sometimes called with rt6_exception_lock held
1604 */
1605static
1606struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1607 spinlock_t *lock)
1608{
1609 struct rt6_exception_bucket *bucket;
1610
1611 if (lock)
1612 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1613 lockdep_is_held(lock));
1614 else
1615 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1616
1617 /* remove bucket flushed bit if set */
1618 if (bucket) {
1619 unsigned long p = (unsigned long)bucket;
1620
1621 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1622 bucket = (struct rt6_exception_bucket *)p;
1623 }
1624
1625 return bucket;
1626}
1627
1628static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1629{
1630 unsigned long p = (unsigned long)bucket;
1631
1632 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1633}
1634
1635/* called with rt6_exception_lock held */
1636static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1637 spinlock_t *lock)
1638{
1639 struct rt6_exception_bucket *bucket;
1640 unsigned long p;
1641
1642 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1643 lockdep_is_held(lock));
1644
1645 p = (unsigned long)bucket;
1646 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1647 bucket = (struct rt6_exception_bucket *)p;
1648 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1649}
1650
1651static int rt6_insert_exception(struct rt6_info *nrt,
1652 const struct fib6_result *res)
1653{
1654 struct net *net = dev_net(nrt->dst.dev);
1655 struct rt6_exception_bucket *bucket;
1656 struct fib6_info *f6i = res->f6i;
1657 struct in6_addr *src_key = NULL;
1658 struct rt6_exception *rt6_ex;
1659 struct fib6_nh *nh = res->nh;
1660 int max_depth;
1661 int err = 0;
1662
1663 spin_lock_bh(&rt6_exception_lock);
1664
1665 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1666 lockdep_is_held(&rt6_exception_lock));
1667 if (!bucket) {
1668 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1669 GFP_ATOMIC);
1670 if (!bucket) {
1671 err = -ENOMEM;
1672 goto out;
1673 }
1674 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1675 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1676 err = -EINVAL;
1677 goto out;
1678 }
1679
1680#ifdef CONFIG_IPV6_SUBTREES
1681 /* fib6_src.plen != 0 indicates f6i is in subtree
1682 * and exception table is indexed by a hash of
1683 * both fib6_dst and fib6_src.
1684 * Otherwise, the exception table is indexed by
1685 * a hash of only fib6_dst.
1686 */
1687 if (f6i->fib6_src.plen)
1688 src_key = &nrt->rt6i_src.addr;
1689#endif
1690 /* rt6_mtu_change() might lower mtu on f6i.
1691 * Only insert this exception route if its mtu
1692 * is less than f6i's mtu value.
1693 */
1694 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1695 err = -EINVAL;
1696 goto out;
1697 }
1698
1699 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1700 src_key);
1701 if (rt6_ex)
1702 rt6_remove_exception(bucket, rt6_ex);
1703
1704 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1705 if (!rt6_ex) {
1706 err = -ENOMEM;
1707 goto out;
1708 }
1709 rt6_ex->rt6i = nrt;
1710 rt6_ex->stamp = jiffies;
1711 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1712 bucket->depth++;
1713 net->ipv6.rt6_stats->fib_rt_cache++;
1714
1715 /* Randomize max depth to avoid some side channels attacks. */
1716 max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
1717 while (bucket->depth > max_depth)
1718 rt6_exception_remove_oldest(bucket);
1719
1720out:
1721 spin_unlock_bh(&rt6_exception_lock);
1722
1723 /* Update fn->fn_sernum to invalidate all cached dst */
1724 if (!err) {
1725 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1726 fib6_update_sernum(net, f6i);
1727 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1728 fib6_force_start_gc(net);
1729 }
1730
1731 return err;
1732}
1733
1734static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1735{
1736 struct rt6_exception_bucket *bucket;
1737 struct rt6_exception *rt6_ex;
1738 struct hlist_node *tmp;
1739 int i;
1740
1741 spin_lock_bh(&rt6_exception_lock);
1742
1743 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1744 if (!bucket)
1745 goto out;
1746
1747 /* Prevent rt6_insert_exception() to recreate the bucket list */
1748 if (!from)
1749 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1750
1751 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1752 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1753 if (!from ||
1754 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1755 rt6_remove_exception(bucket, rt6_ex);
1756 }
1757 WARN_ON_ONCE(!from && bucket->depth);
1758 bucket++;
1759 }
1760out:
1761 spin_unlock_bh(&rt6_exception_lock);
1762}
1763
1764static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1765{
1766 struct fib6_info *f6i = arg;
1767
1768 fib6_nh_flush_exceptions(nh, f6i);
1769
1770 return 0;
1771}
1772
1773void rt6_flush_exceptions(struct fib6_info *f6i)
1774{
1775 if (f6i->nh)
1776 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1777 f6i);
1778 else
1779 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1780}
1781
1782/* Find cached rt in the hash table inside passed in rt
1783 * Caller has to hold rcu_read_lock()
1784 */
1785static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1786 const struct in6_addr *daddr,
1787 const struct in6_addr *saddr)
1788{
1789 const struct in6_addr *src_key = NULL;
1790 struct rt6_exception_bucket *bucket;
1791 struct rt6_exception *rt6_ex;
1792 struct rt6_info *ret = NULL;
1793
1794#ifdef CONFIG_IPV6_SUBTREES
1795 /* fib6i_src.plen != 0 indicates f6i is in subtree
1796 * and exception table is indexed by a hash of
1797 * both fib6_dst and fib6_src.
1798 * However, the src addr used to create the hash
1799 * might not be exactly the passed in saddr which
1800 * is a /128 addr from the flow.
1801 * So we need to use f6i->fib6_src to redo lookup
1802 * if the passed in saddr does not find anything.
1803 * (See the logic in ip6_rt_cache_alloc() on how
1804 * rt->rt6i_src is updated.)
1805 */
1806 if (res->f6i->fib6_src.plen)
1807 src_key = saddr;
1808find_ex:
1809#endif
1810 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1811 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1812
1813 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1814 ret = rt6_ex->rt6i;
1815
1816#ifdef CONFIG_IPV6_SUBTREES
1817 /* Use fib6_src as src_key and redo lookup */
1818 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1819 src_key = &res->f6i->fib6_src.addr;
1820 goto find_ex;
1821 }
1822#endif
1823
1824 return ret;
1825}
1826
1827/* Remove the passed in cached rt from the hash table that contains it */
1828static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1829 const struct rt6_info *rt)
1830{
1831 const struct in6_addr *src_key = NULL;
1832 struct rt6_exception_bucket *bucket;
1833 struct rt6_exception *rt6_ex;
1834 int err;
1835
1836 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1837 return -ENOENT;
1838
1839 spin_lock_bh(&rt6_exception_lock);
1840 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1841
1842#ifdef CONFIG_IPV6_SUBTREES
1843 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1844 * and exception table is indexed by a hash of
1845 * both rt6i_dst and rt6i_src.
1846 * Otherwise, the exception table is indexed by
1847 * a hash of only rt6i_dst.
1848 */
1849 if (plen)
1850 src_key = &rt->rt6i_src.addr;
1851#endif
1852 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1853 &rt->rt6i_dst.addr,
1854 src_key);
1855 if (rt6_ex) {
1856 rt6_remove_exception(bucket, rt6_ex);
1857 err = 0;
1858 } else {
1859 err = -ENOENT;
1860 }
1861
1862 spin_unlock_bh(&rt6_exception_lock);
1863 return err;
1864}
1865
1866struct fib6_nh_excptn_arg {
1867 struct rt6_info *rt;
1868 int plen;
1869};
1870
1871static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1872{
1873 struct fib6_nh_excptn_arg *arg = _arg;
1874 int err;
1875
1876 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1877 if (err == 0)
1878 return 1;
1879
1880 return 0;
1881}
1882
1883static int rt6_remove_exception_rt(struct rt6_info *rt)
1884{
1885 struct fib6_info *from;
1886
1887 from = rcu_dereference(rt->from);
1888 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1889 return -EINVAL;
1890
1891 if (from->nh) {
1892 struct fib6_nh_excptn_arg arg = {
1893 .rt = rt,
1894 .plen = from->fib6_src.plen
1895 };
1896 int rc;
1897
1898 /* rc = 1 means an entry was found */
1899 rc = nexthop_for_each_fib6_nh(from->nh,
1900 rt6_nh_remove_exception_rt,
1901 &arg);
1902 return rc ? 0 : -ENOENT;
1903 }
1904
1905 return fib6_nh_remove_exception(from->fib6_nh,
1906 from->fib6_src.plen, rt);
1907}
1908
1909/* Find rt6_ex which contains the passed in rt cache and
1910 * refresh its stamp
1911 */
1912static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1913 const struct rt6_info *rt)
1914{
1915 const struct in6_addr *src_key = NULL;
1916 struct rt6_exception_bucket *bucket;
1917 struct rt6_exception *rt6_ex;
1918
1919 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1920#ifdef CONFIG_IPV6_SUBTREES
1921 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1922 * and exception table is indexed by a hash of
1923 * both rt6i_dst and rt6i_src.
1924 * Otherwise, the exception table is indexed by
1925 * a hash of only rt6i_dst.
1926 */
1927 if (plen)
1928 src_key = &rt->rt6i_src.addr;
1929#endif
1930 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1931 if (rt6_ex)
1932 rt6_ex->stamp = jiffies;
1933}
1934
1935struct fib6_nh_match_arg {
1936 const struct net_device *dev;
1937 const struct in6_addr *gw;
1938 struct fib6_nh *match;
1939};
1940
1941/* determine if fib6_nh has given device and gateway */
1942static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1943{
1944 struct fib6_nh_match_arg *arg = _arg;
1945
1946 if (arg->dev != nh->fib_nh_dev ||
1947 (arg->gw && !nh->fib_nh_gw_family) ||
1948 (!arg->gw && nh->fib_nh_gw_family) ||
1949 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1950 return 0;
1951
1952 arg->match = nh;
1953
1954 /* found a match, break the loop */
1955 return 1;
1956}
1957
1958static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1959{
1960 struct fib6_info *from;
1961 struct fib6_nh *fib6_nh;
1962
1963 rcu_read_lock();
1964
1965 from = rcu_dereference(rt->from);
1966 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1967 goto unlock;
1968
1969 if (from->nh) {
1970 struct fib6_nh_match_arg arg = {
1971 .dev = rt->dst.dev,
1972 .gw = &rt->rt6i_gateway,
1973 };
1974
1975 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1976
1977 if (!arg.match)
1978 goto unlock;
1979 fib6_nh = arg.match;
1980 } else {
1981 fib6_nh = from->fib6_nh;
1982 }
1983 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1984unlock:
1985 rcu_read_unlock();
1986}
1987
1988static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1989 struct rt6_info *rt, int mtu)
1990{
1991 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1992 * lowest MTU in the path: always allow updating the route PMTU to
1993 * reflect PMTU decreases.
1994 *
1995 * If the new MTU is higher, and the route PMTU is equal to the local
1996 * MTU, this means the old MTU is the lowest in the path, so allow
1997 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1998 * handle this.
1999 */
2000
2001 if (dst_mtu(&rt->dst) >= mtu)
2002 return true;
2003
2004 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2005 return true;
2006
2007 return false;
2008}
2009
2010static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2011 const struct fib6_nh *nh, int mtu)
2012{
2013 struct rt6_exception_bucket *bucket;
2014 struct rt6_exception *rt6_ex;
2015 int i;
2016
2017 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2018 if (!bucket)
2019 return;
2020
2021 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2022 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2023 struct rt6_info *entry = rt6_ex->rt6i;
2024
2025 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2026 * route), the metrics of its rt->from have already
2027 * been updated.
2028 */
2029 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2030 rt6_mtu_change_route_allowed(idev, entry, mtu))
2031 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2032 }
2033 bucket++;
2034 }
2035}
2036
2037#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2038
2039static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2040 const struct in6_addr *gateway)
2041{
2042 struct rt6_exception_bucket *bucket;
2043 struct rt6_exception *rt6_ex;
2044 struct hlist_node *tmp;
2045 int i;
2046
2047 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2048 return;
2049
2050 spin_lock_bh(&rt6_exception_lock);
2051 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2052 if (bucket) {
2053 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2054 hlist_for_each_entry_safe(rt6_ex, tmp,
2055 &bucket->chain, hlist) {
2056 struct rt6_info *entry = rt6_ex->rt6i;
2057
2058 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2059 RTF_CACHE_GATEWAY &&
2060 ipv6_addr_equal(gateway,
2061 &entry->rt6i_gateway)) {
2062 rt6_remove_exception(bucket, rt6_ex);
2063 }
2064 }
2065 bucket++;
2066 }
2067 }
2068
2069 spin_unlock_bh(&rt6_exception_lock);
2070}
2071
2072static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2073 struct rt6_exception *rt6_ex,
2074 struct fib6_gc_args *gc_args,
2075 unsigned long now)
2076{
2077 struct rt6_info *rt = rt6_ex->rt6i;
2078
2079 /* we are pruning and obsoleting aged-out and non gateway exceptions
2080 * even if others have still references to them, so that on next
2081 * dst_check() such references can be dropped.
2082 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2083 * expired, independently from their aging, as per RFC 8201 section 4
2084 */
2085 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2086 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2087 RT6_TRACE("aging clone %p\n", rt);
2088 rt6_remove_exception(bucket, rt6_ex);
2089 return;
2090 }
2091 } else if (time_after(jiffies, rt->dst.expires)) {
2092 RT6_TRACE("purging expired route %p\n", rt);
2093 rt6_remove_exception(bucket, rt6_ex);
2094 return;
2095 }
2096
2097 if (rt->rt6i_flags & RTF_GATEWAY) {
2098 struct neighbour *neigh;
2099
2100 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2101
2102 if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2103 RT6_TRACE("purging route %p via non-router but gateway\n",
2104 rt);
2105 rt6_remove_exception(bucket, rt6_ex);
2106 return;
2107 }
2108 }
2109
2110 gc_args->more++;
2111}
2112
2113static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2114 struct fib6_gc_args *gc_args,
2115 unsigned long now)
2116{
2117 struct rt6_exception_bucket *bucket;
2118 struct rt6_exception *rt6_ex;
2119 struct hlist_node *tmp;
2120 int i;
2121
2122 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2123 return;
2124
2125 rcu_read_lock_bh();
2126 spin_lock(&rt6_exception_lock);
2127 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2128 if (bucket) {
2129 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2130 hlist_for_each_entry_safe(rt6_ex, tmp,
2131 &bucket->chain, hlist) {
2132 rt6_age_examine_exception(bucket, rt6_ex,
2133 gc_args, now);
2134 }
2135 bucket++;
2136 }
2137 }
2138 spin_unlock(&rt6_exception_lock);
2139 rcu_read_unlock_bh();
2140}
2141
2142struct fib6_nh_age_excptn_arg {
2143 struct fib6_gc_args *gc_args;
2144 unsigned long now;
2145};
2146
2147static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2148{
2149 struct fib6_nh_age_excptn_arg *arg = _arg;
2150
2151 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2152 return 0;
2153}
2154
2155void rt6_age_exceptions(struct fib6_info *f6i,
2156 struct fib6_gc_args *gc_args,
2157 unsigned long now)
2158{
2159 if (f6i->nh) {
2160 struct fib6_nh_age_excptn_arg arg = {
2161 .gc_args = gc_args,
2162 .now = now
2163 };
2164
2165 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2166 &arg);
2167 } else {
2168 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2169 }
2170}
2171
2172/* must be called with rcu lock held */
2173int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2174 struct flowi6 *fl6, struct fib6_result *res, int strict)
2175{
2176 struct fib6_node *fn, *saved_fn;
2177
2178 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2179 saved_fn = fn;
2180
2181 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2182 oif = 0;
2183
2184redo_rt6_select:
2185 rt6_select(net, fn, oif, res, strict);
2186 if (res->f6i == net->ipv6.fib6_null_entry) {
2187 fn = fib6_backtrack(fn, &fl6->saddr);
2188 if (fn)
2189 goto redo_rt6_select;
2190 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2191 /* also consider unreachable route */
2192 strict &= ~RT6_LOOKUP_F_REACHABLE;
2193 fn = saved_fn;
2194 goto redo_rt6_select;
2195 }
2196 }
2197
2198 trace_fib6_table_lookup(net, res, table, fl6);
2199
2200 return 0;
2201}
2202
2203struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2204 int oif, struct flowi6 *fl6,
2205 const struct sk_buff *skb, int flags)
2206{
2207 struct fib6_result res = {};
2208 struct rt6_info *rt = NULL;
2209 int strict = 0;
2210
2211 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2212 !rcu_read_lock_held());
2213
2214 strict |= flags & RT6_LOOKUP_F_IFACE;
2215 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2216 if (net->ipv6.devconf_all->forwarding == 0)
2217 strict |= RT6_LOOKUP_F_REACHABLE;
2218
2219 rcu_read_lock();
2220
2221 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2222 if (res.f6i == net->ipv6.fib6_null_entry)
2223 goto out;
2224
2225 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2226
2227 /*Search through exception table */
2228 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2229 if (rt) {
2230 goto out;
2231 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2232 !res.nh->fib_nh_gw_family)) {
2233 /* Create a RTF_CACHE clone which will not be
2234 * owned by the fib6 tree. It is for the special case where
2235 * the daddr in the skb during the neighbor look-up is different
2236 * from the fl6->daddr used to look-up route here.
2237 */
2238 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2239
2240 if (rt) {
2241 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2242 * As rt6_uncached_list_add() does not consume refcnt,
2243 * this refcnt is always returned to the caller even
2244 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2245 */
2246 rt6_uncached_list_add(rt);
2247 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2248 rcu_read_unlock();
2249
2250 return rt;
2251 }
2252 } else {
2253 /* Get a percpu copy */
2254 local_bh_disable();
2255 rt = rt6_get_pcpu_route(&res);
2256
2257 if (!rt)
2258 rt = rt6_make_pcpu_route(net, &res);
2259
2260 local_bh_enable();
2261 }
2262out:
2263 if (!rt)
2264 rt = net->ipv6.ip6_null_entry;
2265 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2266 ip6_hold_safe(net, &rt);
2267 rcu_read_unlock();
2268
2269 return rt;
2270}
2271EXPORT_SYMBOL_GPL(ip6_pol_route);
2272
2273INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2274 struct fib6_table *table,
2275 struct flowi6 *fl6,
2276 const struct sk_buff *skb,
2277 int flags)
2278{
2279 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2280}
2281
2282struct dst_entry *ip6_route_input_lookup(struct net *net,
2283 struct net_device *dev,
2284 struct flowi6 *fl6,
2285 const struct sk_buff *skb,
2286 int flags)
2287{
2288 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2289 flags |= RT6_LOOKUP_F_IFACE;
2290
2291 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2292}
2293EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2294
2295static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2296 struct flow_keys *keys,
2297 struct flow_keys *flkeys)
2298{
2299 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2300 const struct ipv6hdr *key_iph = outer_iph;
2301 struct flow_keys *_flkeys = flkeys;
2302 const struct ipv6hdr *inner_iph;
2303 const struct icmp6hdr *icmph;
2304 struct ipv6hdr _inner_iph;
2305 struct icmp6hdr _icmph;
2306
2307 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2308 goto out;
2309
2310 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2311 sizeof(_icmph), &_icmph);
2312 if (!icmph)
2313 goto out;
2314
2315 if (!icmpv6_is_err(icmph->icmp6_type))
2316 goto out;
2317
2318 inner_iph = skb_header_pointer(skb,
2319 skb_transport_offset(skb) + sizeof(*icmph),
2320 sizeof(_inner_iph), &_inner_iph);
2321 if (!inner_iph)
2322 goto out;
2323
2324 key_iph = inner_iph;
2325 _flkeys = NULL;
2326out:
2327 if (_flkeys) {
2328 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2329 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2330 keys->tags.flow_label = _flkeys->tags.flow_label;
2331 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2332 } else {
2333 keys->addrs.v6addrs.src = key_iph->saddr;
2334 keys->addrs.v6addrs.dst = key_iph->daddr;
2335 keys->tags.flow_label = ip6_flowlabel(key_iph);
2336 keys->basic.ip_proto = key_iph->nexthdr;
2337 }
2338}
2339
2340static u32 rt6_multipath_custom_hash_outer(const struct net *net,
2341 const struct sk_buff *skb,
2342 bool *p_has_inner)
2343{
2344 u32 hash_fields = ip6_multipath_hash_fields(net);
2345 struct flow_keys keys, hash_keys;
2346
2347 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2348 return 0;
2349
2350 memset(&hash_keys, 0, sizeof(hash_keys));
2351 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
2352
2353 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2354 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2355 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2356 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2357 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2358 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2359 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2360 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2361 hash_keys.tags.flow_label = keys.tags.flow_label;
2362 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2363 hash_keys.ports.src = keys.ports.src;
2364 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2365 hash_keys.ports.dst = keys.ports.dst;
2366
2367 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
2368 return flow_hash_from_keys(&hash_keys);
2369}
2370
2371static u32 rt6_multipath_custom_hash_inner(const struct net *net,
2372 const struct sk_buff *skb,
2373 bool has_inner)
2374{
2375 u32 hash_fields = ip6_multipath_hash_fields(net);
2376 struct flow_keys keys, hash_keys;
2377
2378 /* We assume the packet carries an encapsulation, but if none was
2379 * encountered during dissection of the outer flow, then there is no
2380 * point in calling the flow dissector again.
2381 */
2382 if (!has_inner)
2383 return 0;
2384
2385 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
2386 return 0;
2387
2388 memset(&hash_keys, 0, sizeof(hash_keys));
2389 skb_flow_dissect_flow_keys(skb, &keys, 0);
2390
2391 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
2392 return 0;
2393
2394 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2395 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2396 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2397 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2398 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2399 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2400 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2401 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2402 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2403 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2404 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2405 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2406 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
2407 hash_keys.tags.flow_label = keys.tags.flow_label;
2408 }
2409
2410 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2411 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2412 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2413 hash_keys.ports.src = keys.ports.src;
2414 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2415 hash_keys.ports.dst = keys.ports.dst;
2416
2417 return flow_hash_from_keys(&hash_keys);
2418}
2419
2420static u32 rt6_multipath_custom_hash_skb(const struct net *net,
2421 const struct sk_buff *skb)
2422{
2423 u32 mhash, mhash_inner;
2424 bool has_inner = true;
2425
2426 mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
2427 mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
2428
2429 return jhash_2words(mhash, mhash_inner, 0);
2430}
2431
2432static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
2433 const struct flowi6 *fl6)
2434{
2435 u32 hash_fields = ip6_multipath_hash_fields(net);
2436 struct flow_keys hash_keys;
2437
2438 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2439 return 0;
2440
2441 memset(&hash_keys, 0, sizeof(hash_keys));
2442 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2443 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2444 hash_keys.addrs.v6addrs.src = fl6->saddr;
2445 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2446 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2447 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2448 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2449 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2450 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2451 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2452 hash_keys.ports.src = fl6->fl6_sport;
2453 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2454 hash_keys.ports.dst = fl6->fl6_dport;
2455
2456 return flow_hash_from_keys(&hash_keys);
2457}
2458
2459/* if skb is set it will be used and fl6 can be NULL */
2460u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2461 const struct sk_buff *skb, struct flow_keys *flkeys)
2462{
2463 struct flow_keys hash_keys;
2464 u32 mhash = 0;
2465
2466 switch (ip6_multipath_hash_policy(net)) {
2467 case 0:
2468 memset(&hash_keys, 0, sizeof(hash_keys));
2469 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2470 if (skb) {
2471 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2472 } else {
2473 hash_keys.addrs.v6addrs.src = fl6->saddr;
2474 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2475 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2476 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2477 }
2478 mhash = flow_hash_from_keys(&hash_keys);
2479 break;
2480 case 1:
2481 if (skb) {
2482 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2483 struct flow_keys keys;
2484
2485 /* short-circuit if we already have L4 hash present */
2486 if (skb->l4_hash)
2487 return skb_get_hash_raw(skb) >> 1;
2488
2489 memset(&hash_keys, 0, sizeof(hash_keys));
2490
2491 if (!flkeys) {
2492 skb_flow_dissect_flow_keys(skb, &keys, flag);
2493 flkeys = &keys;
2494 }
2495 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2496 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2497 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2498 hash_keys.ports.src = flkeys->ports.src;
2499 hash_keys.ports.dst = flkeys->ports.dst;
2500 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2501 } else {
2502 memset(&hash_keys, 0, sizeof(hash_keys));
2503 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2504 hash_keys.addrs.v6addrs.src = fl6->saddr;
2505 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2506 hash_keys.ports.src = fl6->fl6_sport;
2507 hash_keys.ports.dst = fl6->fl6_dport;
2508 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2509 }
2510 mhash = flow_hash_from_keys(&hash_keys);
2511 break;
2512 case 2:
2513 memset(&hash_keys, 0, sizeof(hash_keys));
2514 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2515 if (skb) {
2516 struct flow_keys keys;
2517
2518 if (!flkeys) {
2519 skb_flow_dissect_flow_keys(skb, &keys, 0);
2520 flkeys = &keys;
2521 }
2522
2523 /* Inner can be v4 or v6 */
2524 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2525 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2526 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2527 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2528 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2529 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2530 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2531 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2532 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2533 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2534 } else {
2535 /* Same as case 0 */
2536 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2537 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2538 }
2539 } else {
2540 /* Same as case 0 */
2541 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2542 hash_keys.addrs.v6addrs.src = fl6->saddr;
2543 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2544 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2545 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2546 }
2547 mhash = flow_hash_from_keys(&hash_keys);
2548 break;
2549 case 3:
2550 if (skb)
2551 mhash = rt6_multipath_custom_hash_skb(net, skb);
2552 else
2553 mhash = rt6_multipath_custom_hash_fl6(net, fl6);
2554 break;
2555 }
2556
2557 return mhash >> 1;
2558}
2559
2560/* Called with rcu held */
2561void ip6_route_input(struct sk_buff *skb)
2562{
2563 const struct ipv6hdr *iph = ipv6_hdr(skb);
2564 struct net *net = dev_net(skb->dev);
2565 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2566 struct ip_tunnel_info *tun_info;
2567 struct flowi6 fl6 = {
2568 .flowi6_iif = skb->dev->ifindex,
2569 .daddr = iph->daddr,
2570 .saddr = iph->saddr,
2571 .flowlabel = ip6_flowinfo(iph),
2572 .flowi6_mark = skb->mark,
2573 .flowi6_proto = iph->nexthdr,
2574 };
2575 struct flow_keys *flkeys = NULL, _flkeys;
2576
2577 tun_info = skb_tunnel_info(skb);
2578 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2579 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2580
2581 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2582 flkeys = &_flkeys;
2583
2584 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2585 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2586 skb_dst_drop(skb);
2587 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2588 &fl6, skb, flags));
2589}
2590
2591INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2592 struct fib6_table *table,
2593 struct flowi6 *fl6,
2594 const struct sk_buff *skb,
2595 int flags)
2596{
2597 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2598}
2599
2600struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2601 const struct sock *sk,
2602 struct flowi6 *fl6, int flags)
2603{
2604 bool any_src;
2605
2606 if (ipv6_addr_type(&fl6->daddr) &
2607 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2608 struct dst_entry *dst;
2609
2610 /* This function does not take refcnt on the dst */
2611 dst = l3mdev_link_scope_lookup(net, fl6);
2612 if (dst)
2613 return dst;
2614 }
2615
2616 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2617
2618 flags |= RT6_LOOKUP_F_DST_NOREF;
2619 any_src = ipv6_addr_any(&fl6->saddr);
2620 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2621 (fl6->flowi6_oif && any_src))
2622 flags |= RT6_LOOKUP_F_IFACE;
2623
2624 if (!any_src)
2625 flags |= RT6_LOOKUP_F_HAS_SADDR;
2626 else if (sk)
2627 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2628
2629 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2630}
2631EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2632
2633struct dst_entry *ip6_route_output_flags(struct net *net,
2634 const struct sock *sk,
2635 struct flowi6 *fl6,
2636 int flags)
2637{
2638 struct dst_entry *dst;
2639 struct rt6_info *rt6;
2640
2641 rcu_read_lock();
2642 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2643 rt6 = (struct rt6_info *)dst;
2644 /* For dst cached in uncached_list, refcnt is already taken. */
2645 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2646 dst = &net->ipv6.ip6_null_entry->dst;
2647 dst_hold(dst);
2648 }
2649 rcu_read_unlock();
2650
2651 return dst;
2652}
2653EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2654
2655struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2656{
2657 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2658 struct net_device *loopback_dev = net->loopback_dev;
2659 struct dst_entry *new = NULL;
2660
2661 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2662 DST_OBSOLETE_DEAD, 0);
2663 if (rt) {
2664 rt6_info_init(rt);
2665 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2666
2667 new = &rt->dst;
2668 new->__use = 1;
2669 new->input = dst_discard;
2670 new->output = dst_discard_out;
2671
2672 dst_copy_metrics(new, &ort->dst);
2673
2674 rt->rt6i_idev = in6_dev_get(loopback_dev);
2675 rt->rt6i_gateway = ort->rt6i_gateway;
2676 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2677
2678 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2679#ifdef CONFIG_IPV6_SUBTREES
2680 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2681#endif
2682 }
2683
2684 dst_release(dst_orig);
2685 return new ? new : ERR_PTR(-ENOMEM);
2686}
2687
2688/*
2689 * Destination cache support functions
2690 */
2691
2692static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2693{
2694 u32 rt_cookie = 0;
2695
2696 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2697 return false;
2698
2699 if (fib6_check_expired(f6i))
2700 return false;
2701
2702 return true;
2703}
2704
2705static struct dst_entry *rt6_check(struct rt6_info *rt,
2706 struct fib6_info *from,
2707 u32 cookie)
2708{
2709 u32 rt_cookie = 0;
2710
2711 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2712 rt_cookie != cookie)
2713 return NULL;
2714
2715 if (rt6_check_expired(rt))
2716 return NULL;
2717
2718 return &rt->dst;
2719}
2720
2721static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2722 struct fib6_info *from,
2723 u32 cookie)
2724{
2725 if (!__rt6_check_expired(rt) &&
2726 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2727 fib6_check(from, cookie))
2728 return &rt->dst;
2729 else
2730 return NULL;
2731}
2732
2733INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2734 u32 cookie)
2735{
2736 struct dst_entry *dst_ret;
2737 struct fib6_info *from;
2738 struct rt6_info *rt;
2739
2740 rt = container_of(dst, struct rt6_info, dst);
2741
2742 if (rt->sernum)
2743 return rt6_is_valid(rt) ? dst : NULL;
2744
2745 rcu_read_lock();
2746
2747 /* All IPV6 dsts are created with ->obsolete set to the value
2748 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2749 * into this function always.
2750 */
2751
2752 from = rcu_dereference(rt->from);
2753
2754 if (from && (rt->rt6i_flags & RTF_PCPU ||
2755 unlikely(!list_empty(&rt->rt6i_uncached))))
2756 dst_ret = rt6_dst_from_check(rt, from, cookie);
2757 else
2758 dst_ret = rt6_check(rt, from, cookie);
2759
2760 rcu_read_unlock();
2761
2762 return dst_ret;
2763}
2764EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2765
2766static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2767{
2768 struct rt6_info *rt = (struct rt6_info *) dst;
2769
2770 if (rt) {
2771 if (rt->rt6i_flags & RTF_CACHE) {
2772 rcu_read_lock();
2773 if (rt6_check_expired(rt)) {
2774 rt6_remove_exception_rt(rt);
2775 dst = NULL;
2776 }
2777 rcu_read_unlock();
2778 } else {
2779 dst_release(dst);
2780 dst = NULL;
2781 }
2782 }
2783 return dst;
2784}
2785
2786static void ip6_link_failure(struct sk_buff *skb)
2787{
2788 struct rt6_info *rt;
2789
2790 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2791
2792 rt = (struct rt6_info *) skb_dst(skb);
2793 if (rt) {
2794 rcu_read_lock();
2795 if (rt->rt6i_flags & RTF_CACHE) {
2796 rt6_remove_exception_rt(rt);
2797 } else {
2798 struct fib6_info *from;
2799 struct fib6_node *fn;
2800
2801 from = rcu_dereference(rt->from);
2802 if (from) {
2803 fn = rcu_dereference(from->fib6_node);
2804 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2805 fn->fn_sernum = -1;
2806 }
2807 }
2808 rcu_read_unlock();
2809 }
2810}
2811
2812static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2813{
2814 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2815 struct fib6_info *from;
2816
2817 rcu_read_lock();
2818 from = rcu_dereference(rt0->from);
2819 if (from)
2820 rt0->dst.expires = from->expires;
2821 rcu_read_unlock();
2822 }
2823
2824 dst_set_expires(&rt0->dst, timeout);
2825 rt0->rt6i_flags |= RTF_EXPIRES;
2826}
2827
2828static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2829{
2830 struct net *net = dev_net(rt->dst.dev);
2831
2832 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2833 rt->rt6i_flags |= RTF_MODIFIED;
2834 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2835}
2836
2837static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2838{
2839 return !(rt->rt6i_flags & RTF_CACHE) &&
2840 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2841}
2842
2843static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2844 const struct ipv6hdr *iph, u32 mtu,
2845 bool confirm_neigh)
2846{
2847 const struct in6_addr *daddr, *saddr;
2848 struct rt6_info *rt6 = (struct rt6_info *)dst;
2849
2850 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2851 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2852 * [see also comment in rt6_mtu_change_route()]
2853 */
2854
2855 if (iph) {
2856 daddr = &iph->daddr;
2857 saddr = &iph->saddr;
2858 } else if (sk) {
2859 daddr = &sk->sk_v6_daddr;
2860 saddr = &inet6_sk(sk)->saddr;
2861 } else {
2862 daddr = NULL;
2863 saddr = NULL;
2864 }
2865
2866 if (confirm_neigh)
2867 dst_confirm_neigh(dst, daddr);
2868
2869 if (mtu < IPV6_MIN_MTU)
2870 return;
2871 if (mtu >= dst_mtu(dst))
2872 return;
2873
2874 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2875 rt6_do_update_pmtu(rt6, mtu);
2876 /* update rt6_ex->stamp for cache */
2877 if (rt6->rt6i_flags & RTF_CACHE)
2878 rt6_update_exception_stamp_rt(rt6);
2879 } else if (daddr) {
2880 struct fib6_result res = {};
2881 struct rt6_info *nrt6;
2882
2883 rcu_read_lock();
2884 res.f6i = rcu_dereference(rt6->from);
2885 if (!res.f6i)
2886 goto out_unlock;
2887
2888 res.fib6_flags = res.f6i->fib6_flags;
2889 res.fib6_type = res.f6i->fib6_type;
2890
2891 if (res.f6i->nh) {
2892 struct fib6_nh_match_arg arg = {
2893 .dev = dst->dev,
2894 .gw = &rt6->rt6i_gateway,
2895 };
2896
2897 nexthop_for_each_fib6_nh(res.f6i->nh,
2898 fib6_nh_find_match, &arg);
2899
2900 /* fib6_info uses a nexthop that does not have fib6_nh
2901 * using the dst->dev + gw. Should be impossible.
2902 */
2903 if (!arg.match)
2904 goto out_unlock;
2905
2906 res.nh = arg.match;
2907 } else {
2908 res.nh = res.f6i->fib6_nh;
2909 }
2910
2911 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2912 if (nrt6) {
2913 rt6_do_update_pmtu(nrt6, mtu);
2914 if (rt6_insert_exception(nrt6, &res))
2915 dst_release_immediate(&nrt6->dst);
2916 }
2917out_unlock:
2918 rcu_read_unlock();
2919 }
2920}
2921
2922static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2923 struct sk_buff *skb, u32 mtu,
2924 bool confirm_neigh)
2925{
2926 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2927 confirm_neigh);
2928}
2929
2930void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2931 int oif, u32 mark, kuid_t uid)
2932{
2933 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2934 struct dst_entry *dst;
2935 struct flowi6 fl6 = {
2936 .flowi6_oif = oif,
2937 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2938 .daddr = iph->daddr,
2939 .saddr = iph->saddr,
2940 .flowlabel = ip6_flowinfo(iph),
2941 .flowi6_uid = uid,
2942 };
2943
2944 dst = ip6_route_output(net, NULL, &fl6);
2945 if (!dst->error)
2946 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
2947 dst_release(dst);
2948}
2949EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2950
2951void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2952{
2953 int oif = sk->sk_bound_dev_if;
2954 struct dst_entry *dst;
2955
2956 if (!oif && skb->dev)
2957 oif = l3mdev_master_ifindex(skb->dev);
2958
2959 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2960
2961 dst = __sk_dst_get(sk);
2962 if (!dst || !dst->obsolete ||
2963 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2964 return;
2965
2966 bh_lock_sock(sk);
2967 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2968 ip6_datagram_dst_update(sk, false);
2969 bh_unlock_sock(sk);
2970}
2971EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2972
2973void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2974 const struct flowi6 *fl6)
2975{
2976#ifdef CONFIG_IPV6_SUBTREES
2977 struct ipv6_pinfo *np = inet6_sk(sk);
2978#endif
2979
2980 ip6_dst_store(sk, dst,
2981 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2982 &sk->sk_v6_daddr : NULL,
2983#ifdef CONFIG_IPV6_SUBTREES
2984 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2985 &np->saddr :
2986#endif
2987 NULL);
2988}
2989
2990static bool ip6_redirect_nh_match(const struct fib6_result *res,
2991 struct flowi6 *fl6,
2992 const struct in6_addr *gw,
2993 struct rt6_info **ret)
2994{
2995 const struct fib6_nh *nh = res->nh;
2996
2997 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2998 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2999 return false;
3000
3001 /* rt_cache's gateway might be different from its 'parent'
3002 * in the case of an ip redirect.
3003 * So we keep searching in the exception table if the gateway
3004 * is different.
3005 */
3006 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
3007 struct rt6_info *rt_cache;
3008
3009 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
3010 if (rt_cache &&
3011 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
3012 *ret = rt_cache;
3013 return true;
3014 }
3015 return false;
3016 }
3017 return true;
3018}
3019
3020struct fib6_nh_rd_arg {
3021 struct fib6_result *res;
3022 struct flowi6 *fl6;
3023 const struct in6_addr *gw;
3024 struct rt6_info **ret;
3025};
3026
3027static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
3028{
3029 struct fib6_nh_rd_arg *arg = _arg;
3030
3031 arg->res->nh = nh;
3032 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
3033}
3034
3035/* Handle redirects */
3036struct ip6rd_flowi {
3037 struct flowi6 fl6;
3038 struct in6_addr gateway;
3039};
3040
3041INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
3042 struct fib6_table *table,
3043 struct flowi6 *fl6,
3044 const struct sk_buff *skb,
3045 int flags)
3046{
3047 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
3048 struct rt6_info *ret = NULL;
3049 struct fib6_result res = {};
3050 struct fib6_nh_rd_arg arg = {
3051 .res = &res,
3052 .fl6 = fl6,
3053 .gw = &rdfl->gateway,
3054 .ret = &ret
3055 };
3056 struct fib6_info *rt;
3057 struct fib6_node *fn;
3058
3059 /* l3mdev_update_flow overrides oif if the device is enslaved; in
3060 * this case we must match on the real ingress device, so reset it
3061 */
3062 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
3063 fl6->flowi6_oif = skb->dev->ifindex;
3064
3065 /* Get the "current" route for this destination and
3066 * check if the redirect has come from appropriate router.
3067 *
3068 * RFC 4861 specifies that redirects should only be
3069 * accepted if they come from the nexthop to the target.
3070 * Due to the way the routes are chosen, this notion
3071 * is a bit fuzzy and one might need to check all possible
3072 * routes.
3073 */
3074
3075 rcu_read_lock();
3076 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
3077restart:
3078 for_each_fib6_node_rt_rcu(fn) {
3079 res.f6i = rt;
3080 if (fib6_check_expired(rt))
3081 continue;
3082 if (rt->fib6_flags & RTF_REJECT)
3083 break;
3084 if (unlikely(rt->nh)) {
3085 if (nexthop_is_blackhole(rt->nh))
3086 continue;
3087 /* on match, res->nh is filled in and potentially ret */
3088 if (nexthop_for_each_fib6_nh(rt->nh,
3089 fib6_nh_redirect_match,
3090 &arg))
3091 goto out;
3092 } else {
3093 res.nh = rt->fib6_nh;
3094 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
3095 &ret))
3096 goto out;
3097 }
3098 }
3099
3100 if (!rt)
3101 rt = net->ipv6.fib6_null_entry;
3102 else if (rt->fib6_flags & RTF_REJECT) {
3103 ret = net->ipv6.ip6_null_entry;
3104 goto out;
3105 }
3106
3107 if (rt == net->ipv6.fib6_null_entry) {
3108 fn = fib6_backtrack(fn, &fl6->saddr);
3109 if (fn)
3110 goto restart;
3111 }
3112
3113 res.f6i = rt;
3114 res.nh = rt->fib6_nh;
3115out:
3116 if (ret) {
3117 ip6_hold_safe(net, &ret);
3118 } else {
3119 res.fib6_flags = res.f6i->fib6_flags;
3120 res.fib6_type = res.f6i->fib6_type;
3121 ret = ip6_create_rt_rcu(&res);
3122 }
3123
3124 rcu_read_unlock();
3125
3126 trace_fib6_table_lookup(net, &res, table, fl6);
3127 return ret;
3128};
3129
3130static struct dst_entry *ip6_route_redirect(struct net *net,
3131 const struct flowi6 *fl6,
3132 const struct sk_buff *skb,
3133 const struct in6_addr *gateway)
3134{
3135 int flags = RT6_LOOKUP_F_HAS_SADDR;
3136 struct ip6rd_flowi rdfl;
3137
3138 rdfl.fl6 = *fl6;
3139 rdfl.gateway = *gateway;
3140
3141 return fib6_rule_lookup(net, &rdfl.fl6, skb,
3142 flags, __ip6_route_redirect);
3143}
3144
3145void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3146 kuid_t uid)
3147{
3148 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3149 struct dst_entry *dst;
3150 struct flowi6 fl6 = {
3151 .flowi6_iif = LOOPBACK_IFINDEX,
3152 .flowi6_oif = oif,
3153 .flowi6_mark = mark,
3154 .daddr = iph->daddr,
3155 .saddr = iph->saddr,
3156 .flowlabel = ip6_flowinfo(iph),
3157 .flowi6_uid = uid,
3158 };
3159
3160 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3161 rt6_do_redirect(dst, NULL, skb);
3162 dst_release(dst);
3163}
3164EXPORT_SYMBOL_GPL(ip6_redirect);
3165
3166void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3167{
3168 const struct ipv6hdr *iph = ipv6_hdr(skb);
3169 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3170 struct dst_entry *dst;
3171 struct flowi6 fl6 = {
3172 .flowi6_iif = LOOPBACK_IFINDEX,
3173 .flowi6_oif = oif,
3174 .daddr = msg->dest,
3175 .saddr = iph->daddr,
3176 .flowi6_uid = sock_net_uid(net, NULL),
3177 };
3178
3179 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3180 rt6_do_redirect(dst, NULL, skb);
3181 dst_release(dst);
3182}
3183
3184void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3185{
3186 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3187 sk->sk_uid);
3188}
3189EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3190
3191static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3192{
3193 struct net_device *dev = dst->dev;
3194 unsigned int mtu = dst_mtu(dst);
3195 struct net *net = dev_net(dev);
3196
3197 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3198
3199 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3200 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3201
3202 /*
3203 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3204 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3205 * IPV6_MAXPLEN is also valid and means: "any MSS,
3206 * rely only on pmtu discovery"
3207 */
3208 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3209 mtu = IPV6_MAXPLEN;
3210 return mtu;
3211}
3212
3213INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3214{
3215 struct inet6_dev *idev;
3216 unsigned int mtu;
3217
3218 mtu = dst_metric_raw(dst, RTAX_MTU);
3219 if (mtu)
3220 goto out;
3221
3222 mtu = IPV6_MIN_MTU;
3223
3224 rcu_read_lock();
3225 idev = __in6_dev_get(dst->dev);
3226 if (idev)
3227 mtu = idev->cnf.mtu6;
3228 rcu_read_unlock();
3229
3230out:
3231 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3232
3233 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3234}
3235EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3236
3237/* MTU selection:
3238 * 1. mtu on route is locked - use it
3239 * 2. mtu from nexthop exception
3240 * 3. mtu from egress device
3241 *
3242 * based on ip6_dst_mtu_forward and exception logic of
3243 * rt6_find_cached_rt; called with rcu_read_lock
3244 */
3245u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3246 const struct in6_addr *daddr,
3247 const struct in6_addr *saddr)
3248{
3249 const struct fib6_nh *nh = res->nh;
3250 struct fib6_info *f6i = res->f6i;
3251 struct inet6_dev *idev;
3252 struct rt6_info *rt;
3253 u32 mtu = 0;
3254
3255 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3256 mtu = f6i->fib6_pmtu;
3257 if (mtu)
3258 goto out;
3259 }
3260
3261 rt = rt6_find_cached_rt(res, daddr, saddr);
3262 if (unlikely(rt)) {
3263 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3264 } else {
3265 struct net_device *dev = nh->fib_nh_dev;
3266
3267 mtu = IPV6_MIN_MTU;
3268 idev = __in6_dev_get(dev);
3269 if (idev && idev->cnf.mtu6 > mtu)
3270 mtu = idev->cnf.mtu6;
3271 }
3272
3273 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3274out:
3275 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3276}
3277
3278struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3279 struct flowi6 *fl6)
3280{
3281 struct dst_entry *dst;
3282 struct rt6_info *rt;
3283 struct inet6_dev *idev = in6_dev_get(dev);
3284 struct net *net = dev_net(dev);
3285
3286 if (unlikely(!idev))
3287 return ERR_PTR(-ENODEV);
3288
3289 rt = ip6_dst_alloc(net, dev, 0);
3290 if (unlikely(!rt)) {
3291 in6_dev_put(idev);
3292 dst = ERR_PTR(-ENOMEM);
3293 goto out;
3294 }
3295
3296 rt->dst.input = ip6_input;
3297 rt->dst.output = ip6_output;
3298 rt->rt6i_gateway = fl6->daddr;
3299 rt->rt6i_dst.addr = fl6->daddr;
3300 rt->rt6i_dst.plen = 128;
3301 rt->rt6i_idev = idev;
3302 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3303
3304 /* Add this dst into uncached_list so that rt6_disable_ip() can
3305 * do proper release of the net_device
3306 */
3307 rt6_uncached_list_add(rt);
3308 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
3309
3310 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3311
3312out:
3313 return dst;
3314}
3315
3316static int ip6_dst_gc(struct dst_ops *ops)
3317{
3318 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3319 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3320 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
3321 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3322 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3323 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3324 int entries;
3325
3326 entries = dst_entries_get_fast(ops);
3327 if (entries > rt_max_size)
3328 entries = dst_entries_get_slow(ops);
3329
3330 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
3331 entries <= rt_max_size)
3332 goto out;
3333
3334 net->ipv6.ip6_rt_gc_expire++;
3335 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
3336 entries = dst_entries_get_slow(ops);
3337 if (entries < ops->gc_thresh)
3338 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
3339out:
3340 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3341 return entries > rt_max_size;
3342}
3343
3344static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3345 const struct in6_addr *gw_addr, u32 tbid,
3346 int flags, struct fib6_result *res)
3347{
3348 struct flowi6 fl6 = {
3349 .flowi6_oif = cfg->fc_ifindex,
3350 .daddr = *gw_addr,
3351 .saddr = cfg->fc_prefsrc,
3352 };
3353 struct fib6_table *table;
3354 int err;
3355
3356 table = fib6_get_table(net, tbid);
3357 if (!table)
3358 return -EINVAL;
3359
3360 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3361 flags |= RT6_LOOKUP_F_HAS_SADDR;
3362
3363 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3364
3365 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3366 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3367 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3368 cfg->fc_ifindex != 0, NULL, flags);
3369
3370 return err;
3371}
3372
3373static int ip6_route_check_nh_onlink(struct net *net,
3374 struct fib6_config *cfg,
3375 const struct net_device *dev,
3376 struct netlink_ext_ack *extack)
3377{
3378 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3379 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3380 struct fib6_result res = {};
3381 int err;
3382
3383 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3384 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3385 /* ignore match if it is the default route */
3386 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3387 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3388 NL_SET_ERR_MSG(extack,
3389 "Nexthop has invalid gateway or device mismatch");
3390 err = -EINVAL;
3391 }
3392
3393 return err;
3394}
3395
3396static int ip6_route_check_nh(struct net *net,
3397 struct fib6_config *cfg,
3398 struct net_device **_dev,
3399 struct inet6_dev **idev)
3400{
3401 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3402 struct net_device *dev = _dev ? *_dev : NULL;
3403 int flags = RT6_LOOKUP_F_IFACE;
3404 struct fib6_result res = {};
3405 int err = -EHOSTUNREACH;
3406
3407 if (cfg->fc_table) {
3408 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3409 cfg->fc_table, flags, &res);
3410 /* gw_addr can not require a gateway or resolve to a reject
3411 * route. If a device is given, it must match the result.
3412 */
3413 if (err || res.fib6_flags & RTF_REJECT ||
3414 res.nh->fib_nh_gw_family ||
3415 (dev && dev != res.nh->fib_nh_dev))
3416 err = -EHOSTUNREACH;
3417 }
3418
3419 if (err < 0) {
3420 struct flowi6 fl6 = {
3421 .flowi6_oif = cfg->fc_ifindex,
3422 .daddr = *gw_addr,
3423 };
3424
3425 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3426 if (err || res.fib6_flags & RTF_REJECT ||
3427 res.nh->fib_nh_gw_family)
3428 err = -EHOSTUNREACH;
3429
3430 if (err)
3431 return err;
3432
3433 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3434 cfg->fc_ifindex != 0, NULL, flags);
3435 }
3436
3437 err = 0;
3438 if (dev) {
3439 if (dev != res.nh->fib_nh_dev)
3440 err = -EHOSTUNREACH;
3441 } else {
3442 *_dev = dev = res.nh->fib_nh_dev;
3443 dev_hold(dev);
3444 *idev = in6_dev_get(dev);
3445 }
3446
3447 return err;
3448}
3449
3450static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3451 struct net_device **_dev, struct inet6_dev **idev,
3452 struct netlink_ext_ack *extack)
3453{
3454 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3455 int gwa_type = ipv6_addr_type(gw_addr);
3456 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3457 const struct net_device *dev = *_dev;
3458 bool need_addr_check = !dev;
3459 int err = -EINVAL;
3460
3461 /* if gw_addr is local we will fail to detect this in case
3462 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3463 * will return already-added prefix route via interface that
3464 * prefix route was assigned to, which might be non-loopback.
3465 */
3466 if (dev &&
3467 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3468 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3469 goto out;
3470 }
3471
3472 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3473 /* IPv6 strictly inhibits using not link-local
3474 * addresses as nexthop address.
3475 * Otherwise, router will not able to send redirects.
3476 * It is very good, but in some (rare!) circumstances
3477 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3478 * some exceptions. --ANK
3479 * We allow IPv4-mapped nexthops to support RFC4798-type
3480 * addressing
3481 */
3482 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3483 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3484 goto out;
3485 }
3486
3487 rcu_read_lock();
3488
3489 if (cfg->fc_flags & RTNH_F_ONLINK)
3490 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3491 else
3492 err = ip6_route_check_nh(net, cfg, _dev, idev);
3493
3494 rcu_read_unlock();
3495
3496 if (err)
3497 goto out;
3498 }
3499
3500 /* reload in case device was changed */
3501 dev = *_dev;
3502
3503 err = -EINVAL;
3504 if (!dev) {
3505 NL_SET_ERR_MSG(extack, "Egress device not specified");
3506 goto out;
3507 } else if (dev->flags & IFF_LOOPBACK) {
3508 NL_SET_ERR_MSG(extack,
3509 "Egress device can not be loopback device for this route");
3510 goto out;
3511 }
3512
3513 /* if we did not check gw_addr above, do so now that the
3514 * egress device has been resolved.
3515 */
3516 if (need_addr_check &&
3517 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3518 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3519 goto out;
3520 }
3521
3522 err = 0;
3523out:
3524 return err;
3525}
3526
3527static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3528{
3529 if ((flags & RTF_REJECT) ||
3530 (dev && (dev->flags & IFF_LOOPBACK) &&
3531 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3532 !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3533 return true;
3534
3535 return false;
3536}
3537
3538int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3539 struct fib6_config *cfg, gfp_t gfp_flags,
3540 struct netlink_ext_ack *extack)
3541{
3542 struct net_device *dev = NULL;
3543 struct inet6_dev *idev = NULL;
3544 int addr_type;
3545 int err;
3546
3547 fib6_nh->fib_nh_family = AF_INET6;
3548#ifdef CONFIG_IPV6_ROUTER_PREF
3549 fib6_nh->last_probe = jiffies;
3550#endif
3551 if (cfg->fc_is_fdb) {
3552 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3553 fib6_nh->fib_nh_gw_family = AF_INET6;
3554 return 0;
3555 }
3556
3557 err = -ENODEV;
3558 if (cfg->fc_ifindex) {
3559 dev = dev_get_by_index(net, cfg->fc_ifindex);
3560 if (!dev)
3561 goto out;
3562 idev = in6_dev_get(dev);
3563 if (!idev)
3564 goto out;
3565 }
3566
3567 if (cfg->fc_flags & RTNH_F_ONLINK) {
3568 if (!dev) {
3569 NL_SET_ERR_MSG(extack,
3570 "Nexthop device required for onlink");
3571 goto out;
3572 }
3573
3574 if (!(dev->flags & IFF_UP)) {
3575 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3576 err = -ENETDOWN;
3577 goto out;
3578 }
3579
3580 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3581 }
3582
3583 fib6_nh->fib_nh_weight = 1;
3584
3585 /* We cannot add true routes via loopback here,
3586 * they would result in kernel looping; promote them to reject routes
3587 */
3588 addr_type = ipv6_addr_type(&cfg->fc_dst);
3589 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3590 /* hold loopback dev/idev if we haven't done so. */
3591 if (dev != net->loopback_dev) {
3592 if (dev) {
3593 dev_put(dev);
3594 in6_dev_put(idev);
3595 }
3596 dev = net->loopback_dev;
3597 dev_hold(dev);
3598 idev = in6_dev_get(dev);
3599 if (!idev) {
3600 err = -ENODEV;
3601 goto out;
3602 }
3603 }
3604 goto pcpu_alloc;
3605 }
3606
3607 if (cfg->fc_flags & RTF_GATEWAY) {
3608 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3609 if (err)
3610 goto out;
3611
3612 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3613 fib6_nh->fib_nh_gw_family = AF_INET6;
3614 }
3615
3616 err = -ENODEV;
3617 if (!dev)
3618 goto out;
3619
3620 if (idev->cnf.disable_ipv6) {
3621 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3622 err = -EACCES;
3623 goto out;
3624 }
3625
3626 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3627 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3628 err = -ENETDOWN;
3629 goto out;
3630 }
3631
3632 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3633 !netif_carrier_ok(dev))
3634 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3635
3636 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3637 cfg->fc_encap_type, cfg, gfp_flags, extack);
3638 if (err)
3639 goto out;
3640
3641pcpu_alloc:
3642 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3643 if (!fib6_nh->rt6i_pcpu) {
3644 err = -ENOMEM;
3645 goto out;
3646 }
3647
3648 fib6_nh->fib_nh_dev = dev;
3649 fib6_nh->fib_nh_oif = dev->ifindex;
3650 err = 0;
3651out:
3652 if (idev)
3653 in6_dev_put(idev);
3654
3655 if (err) {
3656 lwtstate_put(fib6_nh->fib_nh_lws);
3657 fib6_nh->fib_nh_lws = NULL;
3658 if (dev)
3659 dev_put(dev);
3660 }
3661
3662 return err;
3663}
3664
3665void fib6_nh_release(struct fib6_nh *fib6_nh)
3666{
3667 struct rt6_exception_bucket *bucket;
3668
3669 rcu_read_lock();
3670
3671 fib6_nh_flush_exceptions(fib6_nh, NULL);
3672 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3673 if (bucket) {
3674 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3675 kfree(bucket);
3676 }
3677
3678 rcu_read_unlock();
3679
3680 if (fib6_nh->rt6i_pcpu) {
3681 int cpu;
3682
3683 for_each_possible_cpu(cpu) {
3684 struct rt6_info **ppcpu_rt;
3685 struct rt6_info *pcpu_rt;
3686
3687 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3688 pcpu_rt = *ppcpu_rt;
3689 if (pcpu_rt) {
3690 dst_dev_put(&pcpu_rt->dst);
3691 dst_release(&pcpu_rt->dst);
3692 *ppcpu_rt = NULL;
3693 }
3694 }
3695
3696 free_percpu(fib6_nh->rt6i_pcpu);
3697 }
3698
3699 fib_nh_common_release(&fib6_nh->nh_common);
3700}
3701
3702static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3703 gfp_t gfp_flags,
3704 struct netlink_ext_ack *extack)
3705{
3706 struct net *net = cfg->fc_nlinfo.nl_net;
3707 struct fib6_info *rt = NULL;
3708 struct nexthop *nh = NULL;
3709 struct fib6_table *table;
3710 struct fib6_nh *fib6_nh;
3711 int err = -EINVAL;
3712 int addr_type;
3713
3714 /* RTF_PCPU is an internal flag; can not be set by userspace */
3715 if (cfg->fc_flags & RTF_PCPU) {
3716 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3717 goto out;
3718 }
3719
3720 /* RTF_CACHE is an internal flag; can not be set by userspace */
3721 if (cfg->fc_flags & RTF_CACHE) {
3722 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3723 goto out;
3724 }
3725
3726 if (cfg->fc_type > RTN_MAX) {
3727 NL_SET_ERR_MSG(extack, "Invalid route type");
3728 goto out;
3729 }
3730
3731 if (cfg->fc_dst_len > 128) {
3732 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3733 goto out;
3734 }
3735 if (cfg->fc_src_len > 128) {
3736 NL_SET_ERR_MSG(extack, "Invalid source address length");
3737 goto out;
3738 }
3739#ifndef CONFIG_IPV6_SUBTREES
3740 if (cfg->fc_src_len) {
3741 NL_SET_ERR_MSG(extack,
3742 "Specifying source address requires IPV6_SUBTREES to be enabled");
3743 goto out;
3744 }
3745#endif
3746 if (cfg->fc_nh_id) {
3747 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3748 if (!nh) {
3749 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3750 goto out;
3751 }
3752 err = fib6_check_nexthop(nh, cfg, extack);
3753 if (err)
3754 goto out;
3755 }
3756
3757 err = -ENOBUFS;
3758 if (cfg->fc_nlinfo.nlh &&
3759 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3760 table = fib6_get_table(net, cfg->fc_table);
3761 if (!table) {
3762 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3763 table = fib6_new_table(net, cfg->fc_table);
3764 }
3765 } else {
3766 table = fib6_new_table(net, cfg->fc_table);
3767 }
3768
3769 if (!table)
3770 goto out;
3771
3772 err = -ENOMEM;
3773 rt = fib6_info_alloc(gfp_flags, !nh);
3774 if (!rt)
3775 goto out;
3776
3777 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3778 extack);
3779 if (IS_ERR(rt->fib6_metrics)) {
3780 err = PTR_ERR(rt->fib6_metrics);
3781 /* Do not leave garbage there. */
3782 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3783 goto out_free;
3784 }
3785
3786 if (cfg->fc_flags & RTF_ADDRCONF)
3787 rt->dst_nocount = true;
3788
3789 if (cfg->fc_flags & RTF_EXPIRES)
3790 fib6_set_expires(rt, jiffies +
3791 clock_t_to_jiffies(cfg->fc_expires));
3792 else
3793 fib6_clean_expires(rt);
3794
3795 if (cfg->fc_protocol == RTPROT_UNSPEC)
3796 cfg->fc_protocol = RTPROT_BOOT;
3797 rt->fib6_protocol = cfg->fc_protocol;
3798
3799 rt->fib6_table = table;
3800 rt->fib6_metric = cfg->fc_metric;
3801 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3802 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3803
3804 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3805 rt->fib6_dst.plen = cfg->fc_dst_len;
3806
3807#ifdef CONFIG_IPV6_SUBTREES
3808 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3809 rt->fib6_src.plen = cfg->fc_src_len;
3810#endif
3811 if (nh) {
3812 if (rt->fib6_src.plen) {
3813 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3814 goto out_free;
3815 }
3816 if (!nexthop_get(nh)) {
3817 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3818 goto out_free;
3819 }
3820 rt->nh = nh;
3821 fib6_nh = nexthop_fib6_nh(rt->nh);
3822 } else {
3823 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3824 if (err)
3825 goto out;
3826
3827 fib6_nh = rt->fib6_nh;
3828
3829 /* We cannot add true routes via loopback here, they would
3830 * result in kernel looping; promote them to reject routes
3831 */
3832 addr_type = ipv6_addr_type(&cfg->fc_dst);
3833 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3834 addr_type))
3835 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3836 }
3837
3838 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3839 struct net_device *dev = fib6_nh->fib_nh_dev;
3840
3841 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3842 NL_SET_ERR_MSG(extack, "Invalid source address");
3843 err = -EINVAL;
3844 goto out;
3845 }
3846 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3847 rt->fib6_prefsrc.plen = 128;
3848 } else
3849 rt->fib6_prefsrc.plen = 0;
3850
3851 return rt;
3852out:
3853 fib6_info_release(rt);
3854 return ERR_PTR(err);
3855out_free:
3856 ip_fib_metrics_put(rt->fib6_metrics);
3857 kfree(rt);
3858 return ERR_PTR(err);
3859}
3860
3861int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3862 struct netlink_ext_ack *extack)
3863{
3864 struct fib6_info *rt;
3865 int err;
3866
3867 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3868 if (IS_ERR(rt))
3869 return PTR_ERR(rt);
3870
3871 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3872 fib6_info_release(rt);
3873
3874 return err;
3875}
3876
3877static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3878{
3879 struct net *net = info->nl_net;
3880 struct fib6_table *table;
3881 int err;
3882
3883 if (rt == net->ipv6.fib6_null_entry) {
3884 err = -ENOENT;
3885 goto out;
3886 }
3887
3888 table = rt->fib6_table;
3889 spin_lock_bh(&table->tb6_lock);
3890 err = fib6_del(rt, info);
3891 spin_unlock_bh(&table->tb6_lock);
3892
3893out:
3894 fib6_info_release(rt);
3895 return err;
3896}
3897
3898int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3899{
3900 struct nl_info info = {
3901 .nl_net = net,
3902 .skip_notify = skip_notify
3903 };
3904
3905 return __ip6_del_rt(rt, &info);
3906}
3907
3908static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3909{
3910 struct nl_info *info = &cfg->fc_nlinfo;
3911 struct net *net = info->nl_net;
3912 struct sk_buff *skb = NULL;
3913 struct fib6_table *table;
3914 int err = -ENOENT;
3915
3916 if (rt == net->ipv6.fib6_null_entry)
3917 goto out_put;
3918 table = rt->fib6_table;
3919 spin_lock_bh(&table->tb6_lock);
3920
3921 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3922 struct fib6_info *sibling, *next_sibling;
3923 struct fib6_node *fn;
3924
3925 /* prefer to send a single notification with all hops */
3926 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3927 if (skb) {
3928 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3929
3930 if (rt6_fill_node(net, skb, rt, NULL,
3931 NULL, NULL, 0, RTM_DELROUTE,
3932 info->portid, seq, 0) < 0) {
3933 kfree_skb(skb);
3934 skb = NULL;
3935 } else
3936 info->skip_notify = 1;
3937 }
3938
3939 /* 'rt' points to the first sibling route. If it is not the
3940 * leaf, then we do not need to send a notification. Otherwise,
3941 * we need to check if the last sibling has a next route or not
3942 * and emit a replace or delete notification, respectively.
3943 */
3944 info->skip_notify_kernel = 1;
3945 fn = rcu_dereference_protected(rt->fib6_node,
3946 lockdep_is_held(&table->tb6_lock));
3947 if (rcu_access_pointer(fn->leaf) == rt) {
3948 struct fib6_info *last_sibling, *replace_rt;
3949
3950 last_sibling = list_last_entry(&rt->fib6_siblings,
3951 struct fib6_info,
3952 fib6_siblings);
3953 replace_rt = rcu_dereference_protected(
3954 last_sibling->fib6_next,
3955 lockdep_is_held(&table->tb6_lock));
3956 if (replace_rt)
3957 call_fib6_entry_notifiers_replace(net,
3958 replace_rt);
3959 else
3960 call_fib6_multipath_entry_notifiers(net,
3961 FIB_EVENT_ENTRY_DEL,
3962 rt, rt->fib6_nsiblings,
3963 NULL);
3964 }
3965 list_for_each_entry_safe(sibling, next_sibling,
3966 &rt->fib6_siblings,
3967 fib6_siblings) {
3968 err = fib6_del(sibling, info);
3969 if (err)
3970 goto out_unlock;
3971 }
3972 }
3973
3974 err = fib6_del(rt, info);
3975out_unlock:
3976 spin_unlock_bh(&table->tb6_lock);
3977out_put:
3978 fib6_info_release(rt);
3979
3980 if (skb) {
3981 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3982 info->nlh, gfp_any());
3983 }
3984 return err;
3985}
3986
3987static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3988{
3989 int rc = -ESRCH;
3990
3991 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3992 goto out;
3993
3994 if (cfg->fc_flags & RTF_GATEWAY &&
3995 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3996 goto out;
3997
3998 rc = rt6_remove_exception_rt(rt);
3999out:
4000 return rc;
4001}
4002
4003static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
4004 struct fib6_nh *nh)
4005{
4006 struct fib6_result res = {
4007 .f6i = rt,
4008 .nh = nh,
4009 };
4010 struct rt6_info *rt_cache;
4011
4012 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
4013 if (rt_cache)
4014 return __ip6_del_cached_rt(rt_cache, cfg);
4015
4016 return 0;
4017}
4018
4019struct fib6_nh_del_cached_rt_arg {
4020 struct fib6_config *cfg;
4021 struct fib6_info *f6i;
4022};
4023
4024static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
4025{
4026 struct fib6_nh_del_cached_rt_arg *arg = _arg;
4027 int rc;
4028
4029 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
4030 return rc != -ESRCH ? rc : 0;
4031}
4032
4033static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
4034{
4035 struct fib6_nh_del_cached_rt_arg arg = {
4036 .cfg = cfg,
4037 .f6i = f6i
4038 };
4039
4040 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
4041}
4042
4043static int ip6_route_del(struct fib6_config *cfg,
4044 struct netlink_ext_ack *extack)
4045{
4046 struct fib6_table *table;
4047 struct fib6_info *rt;
4048 struct fib6_node *fn;
4049 int err = -ESRCH;
4050
4051 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
4052 if (!table) {
4053 NL_SET_ERR_MSG(extack, "FIB table does not exist");
4054 return err;
4055 }
4056
4057 rcu_read_lock();
4058
4059 fn = fib6_locate(&table->tb6_root,
4060 &cfg->fc_dst, cfg->fc_dst_len,
4061 &cfg->fc_src, cfg->fc_src_len,
4062 !(cfg->fc_flags & RTF_CACHE));
4063
4064 if (fn) {
4065 for_each_fib6_node_rt_rcu(fn) {
4066 struct fib6_nh *nh;
4067
4068 if (rt->nh && cfg->fc_nh_id &&
4069 rt->nh->id != cfg->fc_nh_id)
4070 continue;
4071
4072 if (cfg->fc_flags & RTF_CACHE) {
4073 int rc = 0;
4074
4075 if (rt->nh) {
4076 rc = ip6_del_cached_rt_nh(cfg, rt);
4077 } else if (cfg->fc_nh_id) {
4078 continue;
4079 } else {
4080 nh = rt->fib6_nh;
4081 rc = ip6_del_cached_rt(cfg, rt, nh);
4082 }
4083 if (rc != -ESRCH) {
4084 rcu_read_unlock();
4085 return rc;
4086 }
4087 continue;
4088 }
4089
4090 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
4091 continue;
4092 if (cfg->fc_protocol &&
4093 cfg->fc_protocol != rt->fib6_protocol)
4094 continue;
4095
4096 if (rt->nh) {
4097 if (!fib6_info_hold_safe(rt))
4098 continue;
4099 rcu_read_unlock();
4100
4101 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4102 }
4103 if (cfg->fc_nh_id)
4104 continue;
4105
4106 nh = rt->fib6_nh;
4107 if (cfg->fc_ifindex &&
4108 (!nh->fib_nh_dev ||
4109 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
4110 continue;
4111 if (cfg->fc_flags & RTF_GATEWAY &&
4112 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4113 continue;
4114 if (!fib6_info_hold_safe(rt))
4115 continue;
4116 rcu_read_unlock();
4117
4118 /* if gateway was specified only delete the one hop */
4119 if (cfg->fc_flags & RTF_GATEWAY)
4120 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
4121
4122 return __ip6_del_rt_siblings(rt, cfg);
4123 }
4124 }
4125 rcu_read_unlock();
4126
4127 return err;
4128}
4129
4130static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4131{
4132 struct netevent_redirect netevent;
4133 struct rt6_info *rt, *nrt = NULL;
4134 struct fib6_result res = {};
4135 struct ndisc_options ndopts;
4136 struct inet6_dev *in6_dev;
4137 struct neighbour *neigh;
4138 struct rd_msg *msg;
4139 int optlen, on_link;
4140 u8 *lladdr;
4141
4142 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4143 optlen -= sizeof(*msg);
4144
4145 if (optlen < 0) {
4146 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4147 return;
4148 }
4149
4150 msg = (struct rd_msg *)icmp6_hdr(skb);
4151
4152 if (ipv6_addr_is_multicast(&msg->dest)) {
4153 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4154 return;
4155 }
4156
4157 on_link = 0;
4158 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4159 on_link = 1;
4160 } else if (ipv6_addr_type(&msg->target) !=
4161 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4162 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4163 return;
4164 }
4165
4166 in6_dev = __in6_dev_get(skb->dev);
4167 if (!in6_dev)
4168 return;
4169 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
4170 return;
4171
4172 /* RFC2461 8.1:
4173 * The IP source address of the Redirect MUST be the same as the current
4174 * first-hop router for the specified ICMP Destination Address.
4175 */
4176
4177 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4178 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4179 return;
4180 }
4181
4182 lladdr = NULL;
4183 if (ndopts.nd_opts_tgt_lladdr) {
4184 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4185 skb->dev);
4186 if (!lladdr) {
4187 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4188 return;
4189 }
4190 }
4191
4192 rt = (struct rt6_info *) dst;
4193 if (rt->rt6i_flags & RTF_REJECT) {
4194 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4195 return;
4196 }
4197
4198 /* Redirect received -> path was valid.
4199 * Look, redirects are sent only in response to data packets,
4200 * so that this nexthop apparently is reachable. --ANK
4201 */
4202 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4203
4204 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4205 if (!neigh)
4206 return;
4207
4208 /*
4209 * We have finally decided to accept it.
4210 */
4211
4212 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4213 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4214 NEIGH_UPDATE_F_OVERRIDE|
4215 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4216 NEIGH_UPDATE_F_ISROUTER)),
4217 NDISC_REDIRECT, &ndopts);
4218
4219 rcu_read_lock();
4220 res.f6i = rcu_dereference(rt->from);
4221 if (!res.f6i)
4222 goto out;
4223
4224 if (res.f6i->nh) {
4225 struct fib6_nh_match_arg arg = {
4226 .dev = dst->dev,
4227 .gw = &rt->rt6i_gateway,
4228 };
4229
4230 nexthop_for_each_fib6_nh(res.f6i->nh,
4231 fib6_nh_find_match, &arg);
4232
4233 /* fib6_info uses a nexthop that does not have fib6_nh
4234 * using the dst->dev. Should be impossible
4235 */
4236 if (!arg.match)
4237 goto out;
4238 res.nh = arg.match;
4239 } else {
4240 res.nh = res.f6i->fib6_nh;
4241 }
4242
4243 res.fib6_flags = res.f6i->fib6_flags;
4244 res.fib6_type = res.f6i->fib6_type;
4245 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4246 if (!nrt)
4247 goto out;
4248
4249 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4250 if (on_link)
4251 nrt->rt6i_flags &= ~RTF_GATEWAY;
4252
4253 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4254
4255 /* rt6_insert_exception() will take care of duplicated exceptions */
4256 if (rt6_insert_exception(nrt, &res)) {
4257 dst_release_immediate(&nrt->dst);
4258 goto out;
4259 }
4260
4261 netevent.old = &rt->dst;
4262 netevent.new = &nrt->dst;
4263 netevent.daddr = &msg->dest;
4264 netevent.neigh = neigh;
4265 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4266
4267out:
4268 rcu_read_unlock();
4269 neigh_release(neigh);
4270}
4271
4272#ifdef CONFIG_IPV6_ROUTE_INFO
4273static struct fib6_info *rt6_get_route_info(struct net *net,
4274 const struct in6_addr *prefix, int prefixlen,
4275 const struct in6_addr *gwaddr,
4276 struct net_device *dev)
4277{
4278 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4279 int ifindex = dev->ifindex;
4280 struct fib6_node *fn;
4281 struct fib6_info *rt = NULL;
4282 struct fib6_table *table;
4283
4284 table = fib6_get_table(net, tb_id);
4285 if (!table)
4286 return NULL;
4287
4288 rcu_read_lock();
4289 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4290 if (!fn)
4291 goto out;
4292
4293 for_each_fib6_node_rt_rcu(fn) {
4294 /* these routes do not use nexthops */
4295 if (rt->nh)
4296 continue;
4297 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4298 continue;
4299 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4300 !rt->fib6_nh->fib_nh_gw_family)
4301 continue;
4302 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4303 continue;
4304 if (!fib6_info_hold_safe(rt))
4305 continue;
4306 break;
4307 }
4308out:
4309 rcu_read_unlock();
4310 return rt;
4311}
4312
4313static struct fib6_info *rt6_add_route_info(struct net *net,
4314 const struct in6_addr *prefix, int prefixlen,
4315 const struct in6_addr *gwaddr,
4316 struct net_device *dev,
4317 unsigned int pref)
4318{
4319 struct fib6_config cfg = {
4320 .fc_metric = IP6_RT_PRIO_USER,
4321 .fc_ifindex = dev->ifindex,
4322 .fc_dst_len = prefixlen,
4323 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4324 RTF_UP | RTF_PREF(pref),
4325 .fc_protocol = RTPROT_RA,
4326 .fc_type = RTN_UNICAST,
4327 .fc_nlinfo.portid = 0,
4328 .fc_nlinfo.nlh = NULL,
4329 .fc_nlinfo.nl_net = net,
4330 };
4331
4332 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4333 cfg.fc_dst = *prefix;
4334 cfg.fc_gateway = *gwaddr;
4335
4336 /* We should treat it as a default route if prefix length is 0. */
4337 if (!prefixlen)
4338 cfg.fc_flags |= RTF_DEFAULT;
4339
4340 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4341
4342 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4343}
4344#endif
4345
4346struct fib6_info *rt6_get_dflt_router(struct net *net,
4347 const struct in6_addr *addr,
4348 struct net_device *dev)
4349{
4350 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4351 struct fib6_info *rt;
4352 struct fib6_table *table;
4353
4354 table = fib6_get_table(net, tb_id);
4355 if (!table)
4356 return NULL;
4357
4358 rcu_read_lock();
4359 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4360 struct fib6_nh *nh;
4361
4362 /* RA routes do not use nexthops */
4363 if (rt->nh)
4364 continue;
4365
4366 nh = rt->fib6_nh;
4367 if (dev == nh->fib_nh_dev &&
4368 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4369 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4370 break;
4371 }
4372 if (rt && !fib6_info_hold_safe(rt))
4373 rt = NULL;
4374 rcu_read_unlock();
4375 return rt;
4376}
4377
4378struct fib6_info *rt6_add_dflt_router(struct net *net,
4379 const struct in6_addr *gwaddr,
4380 struct net_device *dev,
4381 unsigned int pref,
4382 u32 defrtr_usr_metric)
4383{
4384 struct fib6_config cfg = {
4385 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4386 .fc_metric = defrtr_usr_metric,
4387 .fc_ifindex = dev->ifindex,
4388 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4389 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4390 .fc_protocol = RTPROT_RA,
4391 .fc_type = RTN_UNICAST,
4392 .fc_nlinfo.portid = 0,
4393 .fc_nlinfo.nlh = NULL,
4394 .fc_nlinfo.nl_net = net,
4395 };
4396
4397 cfg.fc_gateway = *gwaddr;
4398
4399 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4400 struct fib6_table *table;
4401
4402 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4403 if (table)
4404 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4405 }
4406
4407 return rt6_get_dflt_router(net, gwaddr, dev);
4408}
4409
4410static void __rt6_purge_dflt_routers(struct net *net,
4411 struct fib6_table *table)
4412{
4413 struct fib6_info *rt;
4414
4415restart:
4416 rcu_read_lock();
4417 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4418 struct net_device *dev = fib6_info_nh_dev(rt);
4419 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4420
4421 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4422 (!idev || idev->cnf.accept_ra != 2) &&
4423 fib6_info_hold_safe(rt)) {
4424 rcu_read_unlock();
4425 ip6_del_rt(net, rt, false);
4426 goto restart;
4427 }
4428 }
4429 rcu_read_unlock();
4430
4431 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4432}
4433
4434void rt6_purge_dflt_routers(struct net *net)
4435{
4436 struct fib6_table *table;
4437 struct hlist_head *head;
4438 unsigned int h;
4439
4440 rcu_read_lock();
4441
4442 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4443 head = &net->ipv6.fib_table_hash[h];
4444 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4445 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4446 __rt6_purge_dflt_routers(net, table);
4447 }
4448 }
4449
4450 rcu_read_unlock();
4451}
4452
4453static void rtmsg_to_fib6_config(struct net *net,
4454 struct in6_rtmsg *rtmsg,
4455 struct fib6_config *cfg)
4456{
4457 *cfg = (struct fib6_config){
4458 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4459 : RT6_TABLE_MAIN,
4460 .fc_ifindex = rtmsg->rtmsg_ifindex,
4461 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4462 .fc_expires = rtmsg->rtmsg_info,
4463 .fc_dst_len = rtmsg->rtmsg_dst_len,
4464 .fc_src_len = rtmsg->rtmsg_src_len,
4465 .fc_flags = rtmsg->rtmsg_flags,
4466 .fc_type = rtmsg->rtmsg_type,
4467
4468 .fc_nlinfo.nl_net = net,
4469
4470 .fc_dst = rtmsg->rtmsg_dst,
4471 .fc_src = rtmsg->rtmsg_src,
4472 .fc_gateway = rtmsg->rtmsg_gateway,
4473 };
4474}
4475
4476int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4477{
4478 struct fib6_config cfg;
4479 int err;
4480
4481 if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4482 return -EINVAL;
4483 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4484 return -EPERM;
4485
4486 rtmsg_to_fib6_config(net, rtmsg, &cfg);
4487
4488 rtnl_lock();
4489 switch (cmd) {
4490 case SIOCADDRT:
4491 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4492 break;
4493 case SIOCDELRT:
4494 err = ip6_route_del(&cfg, NULL);
4495 break;
4496 }
4497 rtnl_unlock();
4498 return err;
4499}
4500
4501/*
4502 * Drop the packet on the floor
4503 */
4504
4505static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4506{
4507 struct dst_entry *dst = skb_dst(skb);
4508 struct net *net = dev_net(dst->dev);
4509 struct inet6_dev *idev;
4510 int type;
4511
4512 if (netif_is_l3_master(skb->dev) &&
4513 dst->dev == net->loopback_dev)
4514 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4515 else
4516 idev = ip6_dst_idev(dst);
4517
4518 switch (ipstats_mib_noroutes) {
4519 case IPSTATS_MIB_INNOROUTES:
4520 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4521 if (type == IPV6_ADDR_ANY) {
4522 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4523 break;
4524 }
4525 fallthrough;
4526 case IPSTATS_MIB_OUTNOROUTES:
4527 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4528 break;
4529 }
4530
4531 /* Start over by dropping the dst for l3mdev case */
4532 if (netif_is_l3_master(skb->dev))
4533 skb_dst_drop(skb);
4534
4535 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4536 kfree_skb(skb);
4537 return 0;
4538}
4539
4540static int ip6_pkt_discard(struct sk_buff *skb)
4541{
4542 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4543}
4544
4545static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4546{
4547 skb->dev = skb_dst(skb)->dev;
4548 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4549}
4550
4551static int ip6_pkt_prohibit(struct sk_buff *skb)
4552{
4553 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4554}
4555
4556static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4557{
4558 skb->dev = skb_dst(skb)->dev;
4559 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4560}
4561
4562/*
4563 * Allocate a dst for local (unicast / anycast) address.
4564 */
4565
4566struct fib6_info *addrconf_f6i_alloc(struct net *net,
4567 struct inet6_dev *idev,
4568 const struct in6_addr *addr,
4569 bool anycast, gfp_t gfp_flags)
4570{
4571 struct fib6_config cfg = {
4572 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4573 .fc_ifindex = idev->dev->ifindex,
4574 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4575 .fc_dst = *addr,
4576 .fc_dst_len = 128,
4577 .fc_protocol = RTPROT_KERNEL,
4578 .fc_nlinfo.nl_net = net,
4579 .fc_ignore_dev_down = true,
4580 };
4581 struct fib6_info *f6i;
4582
4583 if (anycast) {
4584 cfg.fc_type = RTN_ANYCAST;
4585 cfg.fc_flags |= RTF_ANYCAST;
4586 } else {
4587 cfg.fc_type = RTN_LOCAL;
4588 cfg.fc_flags |= RTF_LOCAL;
4589 }
4590
4591 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4592 if (!IS_ERR(f6i))
4593 f6i->dst_nocount = true;
4594 return f6i;
4595}
4596
4597/* remove deleted ip from prefsrc entries */
4598struct arg_dev_net_ip {
4599 struct net_device *dev;
4600 struct net *net;
4601 struct in6_addr *addr;
4602};
4603
4604static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4605{
4606 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4607 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4608 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4609
4610 if (!rt->nh &&
4611 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4612 rt != net->ipv6.fib6_null_entry &&
4613 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4614 spin_lock_bh(&rt6_exception_lock);
4615 /* remove prefsrc entry */
4616 rt->fib6_prefsrc.plen = 0;
4617 spin_unlock_bh(&rt6_exception_lock);
4618 }
4619 return 0;
4620}
4621
4622void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4623{
4624 struct net *net = dev_net(ifp->idev->dev);
4625 struct arg_dev_net_ip adni = {
4626 .dev = ifp->idev->dev,
4627 .net = net,
4628 .addr = &ifp->addr,
4629 };
4630 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4631}
4632
4633#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4634
4635/* Remove routers and update dst entries when gateway turn into host. */
4636static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4637{
4638 struct in6_addr *gateway = (struct in6_addr *)arg;
4639 struct fib6_nh *nh;
4640
4641 /* RA routes do not use nexthops */
4642 if (rt->nh)
4643 return 0;
4644
4645 nh = rt->fib6_nh;
4646 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4647 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4648 return -1;
4649
4650 /* Further clean up cached routes in exception table.
4651 * This is needed because cached route may have a different
4652 * gateway than its 'parent' in the case of an ip redirect.
4653 */
4654 fib6_nh_exceptions_clean_tohost(nh, gateway);
4655
4656 return 0;
4657}
4658
4659void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4660{
4661 fib6_clean_all(net, fib6_clean_tohost, gateway);
4662}
4663
4664struct arg_netdev_event {
4665 const struct net_device *dev;
4666 union {
4667 unsigned char nh_flags;
4668 unsigned long event;
4669 };
4670};
4671
4672static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4673{
4674 struct fib6_info *iter;
4675 struct fib6_node *fn;
4676
4677 fn = rcu_dereference_protected(rt->fib6_node,
4678 lockdep_is_held(&rt->fib6_table->tb6_lock));
4679 iter = rcu_dereference_protected(fn->leaf,
4680 lockdep_is_held(&rt->fib6_table->tb6_lock));
4681 while (iter) {
4682 if (iter->fib6_metric == rt->fib6_metric &&
4683 rt6_qualify_for_ecmp(iter))
4684 return iter;
4685 iter = rcu_dereference_protected(iter->fib6_next,
4686 lockdep_is_held(&rt->fib6_table->tb6_lock));
4687 }
4688
4689 return NULL;
4690}
4691
4692/* only called for fib entries with builtin fib6_nh */
4693static bool rt6_is_dead(const struct fib6_info *rt)
4694{
4695 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4696 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4697 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4698 return true;
4699
4700 return false;
4701}
4702
4703static int rt6_multipath_total_weight(const struct fib6_info *rt)
4704{
4705 struct fib6_info *iter;
4706 int total = 0;
4707
4708 if (!rt6_is_dead(rt))
4709 total += rt->fib6_nh->fib_nh_weight;
4710
4711 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4712 if (!rt6_is_dead(iter))
4713 total += iter->fib6_nh->fib_nh_weight;
4714 }
4715
4716 return total;
4717}
4718
4719static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4720{
4721 int upper_bound = -1;
4722
4723 if (!rt6_is_dead(rt)) {
4724 *weight += rt->fib6_nh->fib_nh_weight;
4725 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4726 total) - 1;
4727 }
4728 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4729}
4730
4731static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4732{
4733 struct fib6_info *iter;
4734 int weight = 0;
4735
4736 rt6_upper_bound_set(rt, &weight, total);
4737
4738 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4739 rt6_upper_bound_set(iter, &weight, total);
4740}
4741
4742void rt6_multipath_rebalance(struct fib6_info *rt)
4743{
4744 struct fib6_info *first;
4745 int total;
4746
4747 /* In case the entire multipath route was marked for flushing,
4748 * then there is no need to rebalance upon the removal of every
4749 * sibling route.
4750 */
4751 if (!rt->fib6_nsiblings || rt->should_flush)
4752 return;
4753
4754 /* During lookup routes are evaluated in order, so we need to
4755 * make sure upper bounds are assigned from the first sibling
4756 * onwards.
4757 */
4758 first = rt6_multipath_first_sibling(rt);
4759 if (WARN_ON_ONCE(!first))
4760 return;
4761
4762 total = rt6_multipath_total_weight(first);
4763 rt6_multipath_upper_bound_set(first, total);
4764}
4765
4766static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4767{
4768 const struct arg_netdev_event *arg = p_arg;
4769 struct net *net = dev_net(arg->dev);
4770
4771 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4772 rt->fib6_nh->fib_nh_dev == arg->dev) {
4773 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4774 fib6_update_sernum_upto_root(net, rt);
4775 rt6_multipath_rebalance(rt);
4776 }
4777
4778 return 0;
4779}
4780
4781void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4782{
4783 struct arg_netdev_event arg = {
4784 .dev = dev,
4785 {
4786 .nh_flags = nh_flags,
4787 },
4788 };
4789
4790 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4791 arg.nh_flags |= RTNH_F_LINKDOWN;
4792
4793 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4794}
4795
4796/* only called for fib entries with inline fib6_nh */
4797static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4798 const struct net_device *dev)
4799{
4800 struct fib6_info *iter;
4801
4802 if (rt->fib6_nh->fib_nh_dev == dev)
4803 return true;
4804 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4805 if (iter->fib6_nh->fib_nh_dev == dev)
4806 return true;
4807
4808 return false;
4809}
4810
4811static void rt6_multipath_flush(struct fib6_info *rt)
4812{
4813 struct fib6_info *iter;
4814
4815 rt->should_flush = 1;
4816 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4817 iter->should_flush = 1;
4818}
4819
4820static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4821 const struct net_device *down_dev)
4822{
4823 struct fib6_info *iter;
4824 unsigned int dead = 0;
4825
4826 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4827 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4828 dead++;
4829 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4830 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4831 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4832 dead++;
4833
4834 return dead;
4835}
4836
4837static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4838 const struct net_device *dev,
4839 unsigned char nh_flags)
4840{
4841 struct fib6_info *iter;
4842
4843 if (rt->fib6_nh->fib_nh_dev == dev)
4844 rt->fib6_nh->fib_nh_flags |= nh_flags;
4845 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4846 if (iter->fib6_nh->fib_nh_dev == dev)
4847 iter->fib6_nh->fib_nh_flags |= nh_flags;
4848}
4849
4850/* called with write lock held for table with rt */
4851static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4852{
4853 const struct arg_netdev_event *arg = p_arg;
4854 const struct net_device *dev = arg->dev;
4855 struct net *net = dev_net(dev);
4856
4857 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4858 return 0;
4859
4860 switch (arg->event) {
4861 case NETDEV_UNREGISTER:
4862 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4863 case NETDEV_DOWN:
4864 if (rt->should_flush)
4865 return -1;
4866 if (!rt->fib6_nsiblings)
4867 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4868 if (rt6_multipath_uses_dev(rt, dev)) {
4869 unsigned int count;
4870
4871 count = rt6_multipath_dead_count(rt, dev);
4872 if (rt->fib6_nsiblings + 1 == count) {
4873 rt6_multipath_flush(rt);
4874 return -1;
4875 }
4876 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4877 RTNH_F_LINKDOWN);
4878 fib6_update_sernum(net, rt);
4879 rt6_multipath_rebalance(rt);
4880 }
4881 return -2;
4882 case NETDEV_CHANGE:
4883 if (rt->fib6_nh->fib_nh_dev != dev ||
4884 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4885 break;
4886 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4887 rt6_multipath_rebalance(rt);
4888 break;
4889 }
4890
4891 return 0;
4892}
4893
4894void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4895{
4896 struct arg_netdev_event arg = {
4897 .dev = dev,
4898 {
4899 .event = event,
4900 },
4901 };
4902 struct net *net = dev_net(dev);
4903
4904 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4905 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4906 else
4907 fib6_clean_all(net, fib6_ifdown, &arg);
4908}
4909
4910void rt6_disable_ip(struct net_device *dev, unsigned long event)
4911{
4912 rt6_sync_down_dev(dev, event);
4913 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4914 neigh_ifdown(&nd_tbl, dev);
4915}
4916
4917struct rt6_mtu_change_arg {
4918 struct net_device *dev;
4919 unsigned int mtu;
4920 struct fib6_info *f6i;
4921};
4922
4923static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4924{
4925 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4926 struct fib6_info *f6i = arg->f6i;
4927
4928 /* For administrative MTU increase, there is no way to discover
4929 * IPv6 PMTU increase, so PMTU increase should be updated here.
4930 * Since RFC 1981 doesn't include administrative MTU increase
4931 * update PMTU increase is a MUST. (i.e. jumbo frame)
4932 */
4933 if (nh->fib_nh_dev == arg->dev) {
4934 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4935 u32 mtu = f6i->fib6_pmtu;
4936
4937 if (mtu >= arg->mtu ||
4938 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4939 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4940
4941 spin_lock_bh(&rt6_exception_lock);
4942 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4943 spin_unlock_bh(&rt6_exception_lock);
4944 }
4945
4946 return 0;
4947}
4948
4949static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4950{
4951 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4952 struct inet6_dev *idev;
4953
4954 /* In IPv6 pmtu discovery is not optional,
4955 so that RTAX_MTU lock cannot disable it.
4956 We still use this lock to block changes
4957 caused by addrconf/ndisc.
4958 */
4959
4960 idev = __in6_dev_get(arg->dev);
4961 if (!idev)
4962 return 0;
4963
4964 if (fib6_metric_locked(f6i, RTAX_MTU))
4965 return 0;
4966
4967 arg->f6i = f6i;
4968 if (f6i->nh) {
4969 /* fib6_nh_mtu_change only returns 0, so this is safe */
4970 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4971 arg);
4972 }
4973
4974 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4975}
4976
4977void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4978{
4979 struct rt6_mtu_change_arg arg = {
4980 .dev = dev,
4981 .mtu = mtu,
4982 };
4983
4984 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4985}
4986
4987static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4988 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4989 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4990 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4991 [RTA_OIF] = { .type = NLA_U32 },
4992 [RTA_IIF] = { .type = NLA_U32 },
4993 [RTA_PRIORITY] = { .type = NLA_U32 },
4994 [RTA_METRICS] = { .type = NLA_NESTED },
4995 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4996 [RTA_PREF] = { .type = NLA_U8 },
4997 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4998 [RTA_ENCAP] = { .type = NLA_NESTED },
4999 [RTA_EXPIRES] = { .type = NLA_U32 },
5000 [RTA_UID] = { .type = NLA_U32 },
5001 [RTA_MARK] = { .type = NLA_U32 },
5002 [RTA_TABLE] = { .type = NLA_U32 },
5003 [RTA_IP_PROTO] = { .type = NLA_U8 },
5004 [RTA_SPORT] = { .type = NLA_U16 },
5005 [RTA_DPORT] = { .type = NLA_U16 },
5006 [RTA_NH_ID] = { .type = NLA_U32 },
5007};
5008
5009static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
5010 struct fib6_config *cfg,
5011 struct netlink_ext_ack *extack)
5012{
5013 struct rtmsg *rtm;
5014 struct nlattr *tb[RTA_MAX+1];
5015 unsigned int pref;
5016 int err;
5017
5018 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5019 rtm_ipv6_policy, extack);
5020 if (err < 0)
5021 goto errout;
5022
5023 err = -EINVAL;
5024 rtm = nlmsg_data(nlh);
5025
5026 *cfg = (struct fib6_config){
5027 .fc_table = rtm->rtm_table,
5028 .fc_dst_len = rtm->rtm_dst_len,
5029 .fc_src_len = rtm->rtm_src_len,
5030 .fc_flags = RTF_UP,
5031 .fc_protocol = rtm->rtm_protocol,
5032 .fc_type = rtm->rtm_type,
5033
5034 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
5035 .fc_nlinfo.nlh = nlh,
5036 .fc_nlinfo.nl_net = sock_net(skb->sk),
5037 };
5038
5039 if (rtm->rtm_type == RTN_UNREACHABLE ||
5040 rtm->rtm_type == RTN_BLACKHOLE ||
5041 rtm->rtm_type == RTN_PROHIBIT ||
5042 rtm->rtm_type == RTN_THROW)
5043 cfg->fc_flags |= RTF_REJECT;
5044
5045 if (rtm->rtm_type == RTN_LOCAL)
5046 cfg->fc_flags |= RTF_LOCAL;
5047
5048 if (rtm->rtm_flags & RTM_F_CLONED)
5049 cfg->fc_flags |= RTF_CACHE;
5050
5051 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
5052
5053 if (tb[RTA_NH_ID]) {
5054 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
5055 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
5056 NL_SET_ERR_MSG(extack,
5057 "Nexthop specification and nexthop id are mutually exclusive");
5058 goto errout;
5059 }
5060 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
5061 }
5062
5063 if (tb[RTA_GATEWAY]) {
5064 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
5065 cfg->fc_flags |= RTF_GATEWAY;
5066 }
5067 if (tb[RTA_VIA]) {
5068 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
5069 goto errout;
5070 }
5071
5072 if (tb[RTA_DST]) {
5073 int plen = (rtm->rtm_dst_len + 7) >> 3;
5074
5075 if (nla_len(tb[RTA_DST]) < plen)
5076 goto errout;
5077
5078 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
5079 }
5080
5081 if (tb[RTA_SRC]) {
5082 int plen = (rtm->rtm_src_len + 7) >> 3;
5083
5084 if (nla_len(tb[RTA_SRC]) < plen)
5085 goto errout;
5086
5087 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
5088 }
5089
5090 if (tb[RTA_PREFSRC])
5091 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
5092
5093 if (tb[RTA_OIF])
5094 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
5095
5096 if (tb[RTA_PRIORITY])
5097 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5098
5099 if (tb[RTA_METRICS]) {
5100 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5101 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5102 }
5103
5104 if (tb[RTA_TABLE])
5105 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5106
5107 if (tb[RTA_MULTIPATH]) {
5108 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5109 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5110
5111 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
5112 cfg->fc_mp_len, extack);
5113 if (err < 0)
5114 goto errout;
5115 }
5116
5117 if (tb[RTA_PREF]) {
5118 pref = nla_get_u8(tb[RTA_PREF]);
5119 if (pref != ICMPV6_ROUTER_PREF_LOW &&
5120 pref != ICMPV6_ROUTER_PREF_HIGH)
5121 pref = ICMPV6_ROUTER_PREF_MEDIUM;
5122 cfg->fc_flags |= RTF_PREF(pref);
5123 }
5124
5125 if (tb[RTA_ENCAP])
5126 cfg->fc_encap = tb[RTA_ENCAP];
5127
5128 if (tb[RTA_ENCAP_TYPE]) {
5129 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5130
5131 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5132 if (err < 0)
5133 goto errout;
5134 }
5135
5136 if (tb[RTA_EXPIRES]) {
5137 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5138
5139 if (addrconf_finite_timeout(timeout)) {
5140 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5141 cfg->fc_flags |= RTF_EXPIRES;
5142 }
5143 }
5144
5145 err = 0;
5146errout:
5147 return err;
5148}
5149
5150struct rt6_nh {
5151 struct fib6_info *fib6_info;
5152 struct fib6_config r_cfg;
5153 struct list_head next;
5154};
5155
5156static int ip6_route_info_append(struct net *net,
5157 struct list_head *rt6_nh_list,
5158 struct fib6_info *rt,
5159 struct fib6_config *r_cfg)
5160{
5161 struct rt6_nh *nh;
5162 int err = -EEXIST;
5163
5164 list_for_each_entry(nh, rt6_nh_list, next) {
5165 /* check if fib6_info already exists */
5166 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5167 return err;
5168 }
5169
5170 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5171 if (!nh)
5172 return -ENOMEM;
5173 nh->fib6_info = rt;
5174 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5175 list_add_tail(&nh->next, rt6_nh_list);
5176
5177 return 0;
5178}
5179
5180static void ip6_route_mpath_notify(struct fib6_info *rt,
5181 struct fib6_info *rt_last,
5182 struct nl_info *info,
5183 __u16 nlflags)
5184{
5185 /* if this is an APPEND route, then rt points to the first route
5186 * inserted and rt_last points to last route inserted. Userspace
5187 * wants a consistent dump of the route which starts at the first
5188 * nexthop. Since sibling routes are always added at the end of
5189 * the list, find the first sibling of the last route appended
5190 */
5191 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5192 rt = list_first_entry(&rt_last->fib6_siblings,
5193 struct fib6_info,
5194 fib6_siblings);
5195 }
5196
5197 if (rt)
5198 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5199}
5200
5201static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5202{
5203 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5204 bool should_notify = false;
5205 struct fib6_info *leaf;
5206 struct fib6_node *fn;
5207
5208 rcu_read_lock();
5209 fn = rcu_dereference(rt->fib6_node);
5210 if (!fn)
5211 goto out;
5212
5213 leaf = rcu_dereference(fn->leaf);
5214 if (!leaf)
5215 goto out;
5216
5217 if (rt == leaf ||
5218 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5219 rt6_qualify_for_ecmp(leaf)))
5220 should_notify = true;
5221out:
5222 rcu_read_unlock();
5223
5224 return should_notify;
5225}
5226
5227static int ip6_route_multipath_add(struct fib6_config *cfg,
5228 struct netlink_ext_ack *extack)
5229{
5230 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5231 struct nl_info *info = &cfg->fc_nlinfo;
5232 struct fib6_config r_cfg;
5233 struct rtnexthop *rtnh;
5234 struct fib6_info *rt;
5235 struct rt6_nh *err_nh;
5236 struct rt6_nh *nh, *nh_safe;
5237 __u16 nlflags;
5238 int remaining;
5239 int attrlen;
5240 int err = 1;
5241 int nhn = 0;
5242 int replace = (cfg->fc_nlinfo.nlh &&
5243 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5244 LIST_HEAD(rt6_nh_list);
5245
5246 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5247 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5248 nlflags |= NLM_F_APPEND;
5249
5250 remaining = cfg->fc_mp_len;
5251 rtnh = (struct rtnexthop *)cfg->fc_mp;
5252
5253 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5254 * fib6_info structs per nexthop
5255 */
5256 while (rtnh_ok(rtnh, remaining)) {
5257 memcpy(&r_cfg, cfg, sizeof(*cfg));
5258 if (rtnh->rtnh_ifindex)
5259 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5260
5261 attrlen = rtnh_attrlen(rtnh);
5262 if (attrlen > 0) {
5263 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5264
5265 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5266 if (nla) {
5267 r_cfg.fc_gateway = nla_get_in6_addr(nla);
5268 r_cfg.fc_flags |= RTF_GATEWAY;
5269 }
5270 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5271 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5272 if (nla)
5273 r_cfg.fc_encap_type = nla_get_u16(nla);
5274 }
5275
5276 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5277 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5278 if (IS_ERR(rt)) {
5279 err = PTR_ERR(rt);
5280 rt = NULL;
5281 goto cleanup;
5282 }
5283 if (!rt6_qualify_for_ecmp(rt)) {
5284 err = -EINVAL;
5285 NL_SET_ERR_MSG(extack,
5286 "Device only routes can not be added for IPv6 using the multipath API.");
5287 fib6_info_release(rt);
5288 goto cleanup;
5289 }
5290
5291 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5292
5293 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5294 rt, &r_cfg);
5295 if (err) {
5296 fib6_info_release(rt);
5297 goto cleanup;
5298 }
5299
5300 rtnh = rtnh_next(rtnh, &remaining);
5301 }
5302
5303 if (list_empty(&rt6_nh_list)) {
5304 NL_SET_ERR_MSG(extack,
5305 "Invalid nexthop configuration - no valid nexthops");
5306 return -EINVAL;
5307 }
5308
5309 /* for add and replace send one notification with all nexthops.
5310 * Skip the notification in fib6_add_rt2node and send one with
5311 * the full route when done
5312 */
5313 info->skip_notify = 1;
5314
5315 /* For add and replace, send one notification with all nexthops. For
5316 * append, send one notification with all appended nexthops.
5317 */
5318 info->skip_notify_kernel = 1;
5319
5320 err_nh = NULL;
5321 list_for_each_entry(nh, &rt6_nh_list, next) {
5322 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5323 fib6_info_release(nh->fib6_info);
5324
5325 if (!err) {
5326 /* save reference to last route successfully inserted */
5327 rt_last = nh->fib6_info;
5328
5329 /* save reference to first route for notification */
5330 if (!rt_notif)
5331 rt_notif = nh->fib6_info;
5332 }
5333
5334 /* nh->fib6_info is used or freed at this point, reset to NULL*/
5335 nh->fib6_info = NULL;
5336 if (err) {
5337 if (replace && nhn)
5338 NL_SET_ERR_MSG_MOD(extack,
5339 "multipath route replace failed (check consistency of installed routes)");
5340 err_nh = nh;
5341 goto add_errout;
5342 }
5343
5344 /* Because each route is added like a single route we remove
5345 * these flags after the first nexthop: if there is a collision,
5346 * we have already failed to add the first nexthop:
5347 * fib6_add_rt2node() has rejected it; when replacing, old
5348 * nexthops have been replaced by first new, the rest should
5349 * be added to it.
5350 */
5351 if (cfg->fc_nlinfo.nlh) {
5352 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5353 NLM_F_REPLACE);
5354 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5355 }
5356 nhn++;
5357 }
5358
5359 /* An in-kernel notification should only be sent in case the new
5360 * multipath route is added as the first route in the node, or if
5361 * it was appended to it. We pass 'rt_notif' since it is the first
5362 * sibling and might allow us to skip some checks in the replace case.
5363 */
5364 if (ip6_route_mpath_should_notify(rt_notif)) {
5365 enum fib_event_type fib_event;
5366
5367 if (rt_notif->fib6_nsiblings != nhn - 1)
5368 fib_event = FIB_EVENT_ENTRY_APPEND;
5369 else
5370 fib_event = FIB_EVENT_ENTRY_REPLACE;
5371
5372 err = call_fib6_multipath_entry_notifiers(info->nl_net,
5373 fib_event, rt_notif,
5374 nhn - 1, extack);
5375 if (err) {
5376 /* Delete all the siblings that were just added */
5377 err_nh = NULL;
5378 goto add_errout;
5379 }
5380 }
5381
5382 /* success ... tell user about new route */
5383 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5384 goto cleanup;
5385
5386add_errout:
5387 /* send notification for routes that were added so that
5388 * the delete notifications sent by ip6_route_del are
5389 * coherent
5390 */
5391 if (rt_notif)
5392 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5393
5394 /* Delete routes that were already added */
5395 list_for_each_entry(nh, &rt6_nh_list, next) {
5396 if (err_nh == nh)
5397 break;
5398 ip6_route_del(&nh->r_cfg, extack);
5399 }
5400
5401cleanup:
5402 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5403 if (nh->fib6_info)
5404 fib6_info_release(nh->fib6_info);
5405 list_del(&nh->next);
5406 kfree(nh);
5407 }
5408
5409 return err;
5410}
5411
5412static int ip6_route_multipath_del(struct fib6_config *cfg,
5413 struct netlink_ext_ack *extack)
5414{
5415 struct fib6_config r_cfg;
5416 struct rtnexthop *rtnh;
5417 int last_err = 0;
5418 int remaining;
5419 int attrlen;
5420 int err;
5421
5422 remaining = cfg->fc_mp_len;
5423 rtnh = (struct rtnexthop *)cfg->fc_mp;
5424
5425 /* Parse a Multipath Entry */
5426 while (rtnh_ok(rtnh, remaining)) {
5427 memcpy(&r_cfg, cfg, sizeof(*cfg));
5428 if (rtnh->rtnh_ifindex)
5429 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5430
5431 attrlen = rtnh_attrlen(rtnh);
5432 if (attrlen > 0) {
5433 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5434
5435 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5436 if (nla) {
5437 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
5438 r_cfg.fc_flags |= RTF_GATEWAY;
5439 }
5440 }
5441 err = ip6_route_del(&r_cfg, extack);
5442 if (err)
5443 last_err = err;
5444
5445 rtnh = rtnh_next(rtnh, &remaining);
5446 }
5447
5448 return last_err;
5449}
5450
5451static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5452 struct netlink_ext_ack *extack)
5453{
5454 struct fib6_config cfg;
5455 int err;
5456
5457 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5458 if (err < 0)
5459 return err;
5460
5461 if (cfg.fc_nh_id &&
5462 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5463 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5464 return -EINVAL;
5465 }
5466
5467 if (cfg.fc_mp)
5468 return ip6_route_multipath_del(&cfg, extack);
5469 else {
5470 cfg.fc_delete_all_nh = 1;
5471 return ip6_route_del(&cfg, extack);
5472 }
5473}
5474
5475static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5476 struct netlink_ext_ack *extack)
5477{
5478 struct fib6_config cfg;
5479 int err;
5480
5481 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5482 if (err < 0)
5483 return err;
5484
5485 if (cfg.fc_metric == 0)
5486 cfg.fc_metric = IP6_RT_PRIO_USER;
5487
5488 if (cfg.fc_mp)
5489 return ip6_route_multipath_add(&cfg, extack);
5490 else
5491 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5492}
5493
5494/* add the overhead of this fib6_nh to nexthop_len */
5495static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5496{
5497 int *nexthop_len = arg;
5498
5499 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5500 + NLA_ALIGN(sizeof(struct rtnexthop))
5501 + nla_total_size(16); /* RTA_GATEWAY */
5502
5503 if (nh->fib_nh_lws) {
5504 /* RTA_ENCAP_TYPE */
5505 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5506 /* RTA_ENCAP */
5507 *nexthop_len += nla_total_size(2);
5508 }
5509
5510 return 0;
5511}
5512
5513static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5514{
5515 int nexthop_len;
5516
5517 if (f6i->nh) {
5518 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5519 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5520 &nexthop_len);
5521 } else {
5522 struct fib6_nh *nh = f6i->fib6_nh;
5523
5524 nexthop_len = 0;
5525 if (f6i->fib6_nsiblings) {
5526 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
5527 + NLA_ALIGN(sizeof(struct rtnexthop))
5528 + nla_total_size(16) /* RTA_GATEWAY */
5529 + lwtunnel_get_encap_size(nh->fib_nh_lws);
5530
5531 nexthop_len *= f6i->fib6_nsiblings;
5532 }
5533 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5534 }
5535
5536 return NLMSG_ALIGN(sizeof(struct rtmsg))
5537 + nla_total_size(16) /* RTA_SRC */
5538 + nla_total_size(16) /* RTA_DST */
5539 + nla_total_size(16) /* RTA_GATEWAY */
5540 + nla_total_size(16) /* RTA_PREFSRC */
5541 + nla_total_size(4) /* RTA_TABLE */
5542 + nla_total_size(4) /* RTA_IIF */
5543 + nla_total_size(4) /* RTA_OIF */
5544 + nla_total_size(4) /* RTA_PRIORITY */
5545 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5546 + nla_total_size(sizeof(struct rta_cacheinfo))
5547 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5548 + nla_total_size(1) /* RTA_PREF */
5549 + nexthop_len;
5550}
5551
5552static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5553 unsigned char *flags)
5554{
5555 if (nexthop_is_multipath(nh)) {
5556 struct nlattr *mp;
5557
5558 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5559 if (!mp)
5560 goto nla_put_failure;
5561
5562 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5563 goto nla_put_failure;
5564
5565 nla_nest_end(skb, mp);
5566 } else {
5567 struct fib6_nh *fib6_nh;
5568
5569 fib6_nh = nexthop_fib6_nh(nh);
5570 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5571 flags, false) < 0)
5572 goto nla_put_failure;
5573 }
5574
5575 return 0;
5576
5577nla_put_failure:
5578 return -EMSGSIZE;
5579}
5580
5581static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5582 struct fib6_info *rt, struct dst_entry *dst,
5583 struct in6_addr *dest, struct in6_addr *src,
5584 int iif, int type, u32 portid, u32 seq,
5585 unsigned int flags)
5586{
5587 struct rt6_info *rt6 = (struct rt6_info *)dst;
5588 struct rt6key *rt6_dst, *rt6_src;
5589 u32 *pmetrics, table, rt6_flags;
5590 unsigned char nh_flags = 0;
5591 struct nlmsghdr *nlh;
5592 struct rtmsg *rtm;
5593 long expires = 0;
5594
5595 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5596 if (!nlh)
5597 return -EMSGSIZE;
5598
5599 if (rt6) {
5600 rt6_dst = &rt6->rt6i_dst;
5601 rt6_src = &rt6->rt6i_src;
5602 rt6_flags = rt6->rt6i_flags;
5603 } else {
5604 rt6_dst = &rt->fib6_dst;
5605 rt6_src = &rt->fib6_src;
5606 rt6_flags = rt->fib6_flags;
5607 }
5608
5609 rtm = nlmsg_data(nlh);
5610 rtm->rtm_family = AF_INET6;
5611 rtm->rtm_dst_len = rt6_dst->plen;
5612 rtm->rtm_src_len = rt6_src->plen;
5613 rtm->rtm_tos = 0;
5614 if (rt->fib6_table)
5615 table = rt->fib6_table->tb6_id;
5616 else
5617 table = RT6_TABLE_UNSPEC;
5618 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5619 if (nla_put_u32(skb, RTA_TABLE, table))
5620 goto nla_put_failure;
5621
5622 rtm->rtm_type = rt->fib6_type;
5623 rtm->rtm_flags = 0;
5624 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5625 rtm->rtm_protocol = rt->fib6_protocol;
5626
5627 if (rt6_flags & RTF_CACHE)
5628 rtm->rtm_flags |= RTM_F_CLONED;
5629
5630 if (dest) {
5631 if (nla_put_in6_addr(skb, RTA_DST, dest))
5632 goto nla_put_failure;
5633 rtm->rtm_dst_len = 128;
5634 } else if (rtm->rtm_dst_len)
5635 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5636 goto nla_put_failure;
5637#ifdef CONFIG_IPV6_SUBTREES
5638 if (src) {
5639 if (nla_put_in6_addr(skb, RTA_SRC, src))
5640 goto nla_put_failure;
5641 rtm->rtm_src_len = 128;
5642 } else if (rtm->rtm_src_len &&
5643 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5644 goto nla_put_failure;
5645#endif
5646 if (iif) {
5647#ifdef CONFIG_IPV6_MROUTE
5648 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5649 int err = ip6mr_get_route(net, skb, rtm, portid);
5650
5651 if (err == 0)
5652 return 0;
5653 if (err < 0)
5654 goto nla_put_failure;
5655 } else
5656#endif
5657 if (nla_put_u32(skb, RTA_IIF, iif))
5658 goto nla_put_failure;
5659 } else if (dest) {
5660 struct in6_addr saddr_buf;
5661 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5662 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5663 goto nla_put_failure;
5664 }
5665
5666 if (rt->fib6_prefsrc.plen) {
5667 struct in6_addr saddr_buf;
5668 saddr_buf = rt->fib6_prefsrc.addr;
5669 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5670 goto nla_put_failure;
5671 }
5672
5673 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5674 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5675 goto nla_put_failure;
5676
5677 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5678 goto nla_put_failure;
5679
5680 /* For multipath routes, walk the siblings list and add
5681 * each as a nexthop within RTA_MULTIPATH.
5682 */
5683 if (rt6) {
5684 if (rt6_flags & RTF_GATEWAY &&
5685 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5686 goto nla_put_failure;
5687
5688 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5689 goto nla_put_failure;
5690
5691 if (dst->lwtstate &&
5692 lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5693 goto nla_put_failure;
5694 } else if (rt->fib6_nsiblings) {
5695 struct fib6_info *sibling, *next_sibling;
5696 struct nlattr *mp;
5697
5698 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5699 if (!mp)
5700 goto nla_put_failure;
5701
5702 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5703 rt->fib6_nh->fib_nh_weight, AF_INET6,
5704 0) < 0)
5705 goto nla_put_failure;
5706
5707 list_for_each_entry_safe(sibling, next_sibling,
5708 &rt->fib6_siblings, fib6_siblings) {
5709 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5710 sibling->fib6_nh->fib_nh_weight,
5711 AF_INET6, 0) < 0)
5712 goto nla_put_failure;
5713 }
5714
5715 nla_nest_end(skb, mp);
5716 } else if (rt->nh) {
5717 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5718 goto nla_put_failure;
5719
5720 if (nexthop_is_blackhole(rt->nh))
5721 rtm->rtm_type = RTN_BLACKHOLE;
5722
5723 if (net->ipv4.sysctl_nexthop_compat_mode &&
5724 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5725 goto nla_put_failure;
5726
5727 rtm->rtm_flags |= nh_flags;
5728 } else {
5729 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5730 &nh_flags, false) < 0)
5731 goto nla_put_failure;
5732
5733 rtm->rtm_flags |= nh_flags;
5734 }
5735
5736 if (rt6_flags & RTF_EXPIRES) {
5737 expires = dst ? dst->expires : rt->expires;
5738 expires -= jiffies;
5739 }
5740
5741 if (!dst) {
5742 if (rt->offload)
5743 rtm->rtm_flags |= RTM_F_OFFLOAD;
5744 if (rt->trap)
5745 rtm->rtm_flags |= RTM_F_TRAP;
5746 if (rt->offload_failed)
5747 rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5748 }
5749
5750 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5751 goto nla_put_failure;
5752
5753 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5754 goto nla_put_failure;
5755
5756
5757 nlmsg_end(skb, nlh);
5758 return 0;
5759
5760nla_put_failure:
5761 nlmsg_cancel(skb, nlh);
5762 return -EMSGSIZE;
5763}
5764
5765static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5766{
5767 const struct net_device *dev = arg;
5768
5769 if (nh->fib_nh_dev == dev)
5770 return 1;
5771
5772 return 0;
5773}
5774
5775static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5776 const struct net_device *dev)
5777{
5778 if (f6i->nh) {
5779 struct net_device *_dev = (struct net_device *)dev;
5780
5781 return !!nexthop_for_each_fib6_nh(f6i->nh,
5782 fib6_info_nh_uses_dev,
5783 _dev);
5784 }
5785
5786 if (f6i->fib6_nh->fib_nh_dev == dev)
5787 return true;
5788
5789 if (f6i->fib6_nsiblings) {
5790 struct fib6_info *sibling, *next_sibling;
5791
5792 list_for_each_entry_safe(sibling, next_sibling,
5793 &f6i->fib6_siblings, fib6_siblings) {
5794 if (sibling->fib6_nh->fib_nh_dev == dev)
5795 return true;
5796 }
5797 }
5798
5799 return false;
5800}
5801
5802struct fib6_nh_exception_dump_walker {
5803 struct rt6_rtnl_dump_arg *dump;
5804 struct fib6_info *rt;
5805 unsigned int flags;
5806 unsigned int skip;
5807 unsigned int count;
5808};
5809
5810static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5811{
5812 struct fib6_nh_exception_dump_walker *w = arg;
5813 struct rt6_rtnl_dump_arg *dump = w->dump;
5814 struct rt6_exception_bucket *bucket;
5815 struct rt6_exception *rt6_ex;
5816 int i, err;
5817
5818 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5819 if (!bucket)
5820 return 0;
5821
5822 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5823 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5824 if (w->skip) {
5825 w->skip--;
5826 continue;
5827 }
5828
5829 /* Expiration of entries doesn't bump sernum, insertion
5830 * does. Removal is triggered by insertion, so we can
5831 * rely on the fact that if entries change between two
5832 * partial dumps, this node is scanned again completely,
5833 * see rt6_insert_exception() and fib6_dump_table().
5834 *
5835 * Count expired entries we go through as handled
5836 * entries that we'll skip next time, in case of partial
5837 * node dump. Otherwise, if entries expire meanwhile,
5838 * we'll skip the wrong amount.
5839 */
5840 if (rt6_check_expired(rt6_ex->rt6i)) {
5841 w->count++;
5842 continue;
5843 }
5844
5845 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5846 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5847 RTM_NEWROUTE,
5848 NETLINK_CB(dump->cb->skb).portid,
5849 dump->cb->nlh->nlmsg_seq, w->flags);
5850 if (err)
5851 return err;
5852
5853 w->count++;
5854 }
5855 bucket++;
5856 }
5857
5858 return 0;
5859}
5860
5861/* Return -1 if done with node, number of handled routes on partial dump */
5862int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5863{
5864 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5865 struct fib_dump_filter *filter = &arg->filter;
5866 unsigned int flags = NLM_F_MULTI;
5867 struct net *net = arg->net;
5868 int count = 0;
5869
5870 if (rt == net->ipv6.fib6_null_entry)
5871 return -1;
5872
5873 if ((filter->flags & RTM_F_PREFIX) &&
5874 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5875 /* success since this is not a prefix route */
5876 return -1;
5877 }
5878 if (filter->filter_set &&
5879 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5880 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5881 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5882 return -1;
5883 }
5884
5885 if (filter->filter_set ||
5886 !filter->dump_routes || !filter->dump_exceptions) {
5887 flags |= NLM_F_DUMP_FILTERED;
5888 }
5889
5890 if (filter->dump_routes) {
5891 if (skip) {
5892 skip--;
5893 } else {
5894 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5895 0, RTM_NEWROUTE,
5896 NETLINK_CB(arg->cb->skb).portid,
5897 arg->cb->nlh->nlmsg_seq, flags)) {
5898 return 0;
5899 }
5900 count++;
5901 }
5902 }
5903
5904 if (filter->dump_exceptions) {
5905 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5906 .rt = rt,
5907 .flags = flags,
5908 .skip = skip,
5909 .count = 0 };
5910 int err;
5911
5912 rcu_read_lock();
5913 if (rt->nh) {
5914 err = nexthop_for_each_fib6_nh(rt->nh,
5915 rt6_nh_dump_exceptions,
5916 &w);
5917 } else {
5918 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5919 }
5920 rcu_read_unlock();
5921
5922 if (err)
5923 return count += w.count;
5924 }
5925
5926 return -1;
5927}
5928
5929static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5930 const struct nlmsghdr *nlh,
5931 struct nlattr **tb,
5932 struct netlink_ext_ack *extack)
5933{
5934 struct rtmsg *rtm;
5935 int i, err;
5936
5937 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5938 NL_SET_ERR_MSG_MOD(extack,
5939 "Invalid header for get route request");
5940 return -EINVAL;
5941 }
5942
5943 if (!netlink_strict_get_check(skb))
5944 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5945 rtm_ipv6_policy, extack);
5946
5947 rtm = nlmsg_data(nlh);
5948 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5949 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5950 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5951 rtm->rtm_type) {
5952 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5953 return -EINVAL;
5954 }
5955 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5956 NL_SET_ERR_MSG_MOD(extack,
5957 "Invalid flags for get route request");
5958 return -EINVAL;
5959 }
5960
5961 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5962 rtm_ipv6_policy, extack);
5963 if (err)
5964 return err;
5965
5966 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5967 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5968 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5969 return -EINVAL;
5970 }
5971
5972 for (i = 0; i <= RTA_MAX; i++) {
5973 if (!tb[i])
5974 continue;
5975
5976 switch (i) {
5977 case RTA_SRC:
5978 case RTA_DST:
5979 case RTA_IIF:
5980 case RTA_OIF:
5981 case RTA_MARK:
5982 case RTA_UID:
5983 case RTA_SPORT:
5984 case RTA_DPORT:
5985 case RTA_IP_PROTO:
5986 break;
5987 default:
5988 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5989 return -EINVAL;
5990 }
5991 }
5992
5993 return 0;
5994}
5995
5996static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5997 struct netlink_ext_ack *extack)
5998{
5999 struct net *net = sock_net(in_skb->sk);
6000 struct nlattr *tb[RTA_MAX+1];
6001 int err, iif = 0, oif = 0;
6002 struct fib6_info *from;
6003 struct dst_entry *dst;
6004 struct rt6_info *rt;
6005 struct sk_buff *skb;
6006 struct rtmsg *rtm;
6007 struct flowi6 fl6 = {};
6008 bool fibmatch;
6009
6010 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
6011 if (err < 0)
6012 goto errout;
6013
6014 err = -EINVAL;
6015 rtm = nlmsg_data(nlh);
6016 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
6017 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
6018
6019 if (tb[RTA_SRC]) {
6020 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
6021 goto errout;
6022
6023 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
6024 }
6025
6026 if (tb[RTA_DST]) {
6027 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
6028 goto errout;
6029
6030 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
6031 }
6032
6033 if (tb[RTA_IIF])
6034 iif = nla_get_u32(tb[RTA_IIF]);
6035
6036 if (tb[RTA_OIF])
6037 oif = nla_get_u32(tb[RTA_OIF]);
6038
6039 if (tb[RTA_MARK])
6040 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
6041
6042 if (tb[RTA_UID])
6043 fl6.flowi6_uid = make_kuid(current_user_ns(),
6044 nla_get_u32(tb[RTA_UID]));
6045 else
6046 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
6047
6048 if (tb[RTA_SPORT])
6049 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
6050
6051 if (tb[RTA_DPORT])
6052 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
6053
6054 if (tb[RTA_IP_PROTO]) {
6055 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
6056 &fl6.flowi6_proto, AF_INET6,
6057 extack);
6058 if (err)
6059 goto errout;
6060 }
6061
6062 if (iif) {
6063 struct net_device *dev;
6064 int flags = 0;
6065
6066 rcu_read_lock();
6067
6068 dev = dev_get_by_index_rcu(net, iif);
6069 if (!dev) {
6070 rcu_read_unlock();
6071 err = -ENODEV;
6072 goto errout;
6073 }
6074
6075 fl6.flowi6_iif = iif;
6076
6077 if (!ipv6_addr_any(&fl6.saddr))
6078 flags |= RT6_LOOKUP_F_HAS_SADDR;
6079
6080 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
6081
6082 rcu_read_unlock();
6083 } else {
6084 fl6.flowi6_oif = oif;
6085
6086 dst = ip6_route_output(net, NULL, &fl6);
6087 }
6088
6089
6090 rt = container_of(dst, struct rt6_info, dst);
6091 if (rt->dst.error) {
6092 err = rt->dst.error;
6093 ip6_rt_put(rt);
6094 goto errout;
6095 }
6096
6097 if (rt == net->ipv6.ip6_null_entry) {
6098 err = rt->dst.error;
6099 ip6_rt_put(rt);
6100 goto errout;
6101 }
6102
6103 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6104 if (!skb) {
6105 ip6_rt_put(rt);
6106 err = -ENOBUFS;
6107 goto errout;
6108 }
6109
6110 skb_dst_set(skb, &rt->dst);
6111
6112 rcu_read_lock();
6113 from = rcu_dereference(rt->from);
6114 if (from) {
6115 if (fibmatch)
6116 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6117 iif, RTM_NEWROUTE,
6118 NETLINK_CB(in_skb).portid,
6119 nlh->nlmsg_seq, 0);
6120 else
6121 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6122 &fl6.saddr, iif, RTM_NEWROUTE,
6123 NETLINK_CB(in_skb).portid,
6124 nlh->nlmsg_seq, 0);
6125 } else {
6126 err = -ENETUNREACH;
6127 }
6128 rcu_read_unlock();
6129
6130 if (err < 0) {
6131 kfree_skb(skb);
6132 goto errout;
6133 }
6134
6135 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6136errout:
6137 return err;
6138}
6139
6140void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6141 unsigned int nlm_flags)
6142{
6143 struct sk_buff *skb;
6144 struct net *net = info->nl_net;
6145 u32 seq;
6146 int err;
6147
6148 err = -ENOBUFS;
6149 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6150
6151 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6152 if (!skb)
6153 goto errout;
6154
6155 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6156 event, info->portid, seq, nlm_flags);
6157 if (err < 0) {
6158 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6159 WARN_ON(err == -EMSGSIZE);
6160 kfree_skb(skb);
6161 goto errout;
6162 }
6163 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6164 info->nlh, gfp_any());
6165 return;
6166errout:
6167 if (err < 0)
6168 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6169}
6170
6171void fib6_rt_update(struct net *net, struct fib6_info *rt,
6172 struct nl_info *info)
6173{
6174 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6175 struct sk_buff *skb;
6176 int err = -ENOBUFS;
6177
6178 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6179 if (!skb)
6180 goto errout;
6181
6182 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6183 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6184 if (err < 0) {
6185 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6186 WARN_ON(err == -EMSGSIZE);
6187 kfree_skb(skb);
6188 goto errout;
6189 }
6190 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6191 info->nlh, gfp_any());
6192 return;
6193errout:
6194 if (err < 0)
6195 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6196}
6197
6198void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6199 bool offload, bool trap, bool offload_failed)
6200{
6201 struct sk_buff *skb;
6202 int err;
6203
6204 if (f6i->offload == offload && f6i->trap == trap &&
6205 f6i->offload_failed == offload_failed)
6206 return;
6207
6208 f6i->offload = offload;
6209 f6i->trap = trap;
6210
6211 /* 2 means send notifications only if offload_failed was changed. */
6212 if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
6213 f6i->offload_failed == offload_failed)
6214 return;
6215
6216 f6i->offload_failed = offload_failed;
6217
6218 if (!rcu_access_pointer(f6i->fib6_node))
6219 /* The route was removed from the tree, do not send
6220 * notification.
6221 */
6222 return;
6223
6224 if (!net->ipv6.sysctl.fib_notify_on_flag_change)
6225 return;
6226
6227 skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6228 if (!skb) {
6229 err = -ENOBUFS;
6230 goto errout;
6231 }
6232
6233 err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6234 0, 0);
6235 if (err < 0) {
6236 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6237 WARN_ON(err == -EMSGSIZE);
6238 kfree_skb(skb);
6239 goto errout;
6240 }
6241
6242 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6243 return;
6244
6245errout:
6246 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6247}
6248EXPORT_SYMBOL(fib6_info_hw_flags_set);
6249
6250static int ip6_route_dev_notify(struct notifier_block *this,
6251 unsigned long event, void *ptr)
6252{
6253 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6254 struct net *net = dev_net(dev);
6255
6256 if (!(dev->flags & IFF_LOOPBACK))
6257 return NOTIFY_OK;
6258
6259 if (event == NETDEV_REGISTER) {
6260 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6261 net->ipv6.ip6_null_entry->dst.dev = dev;
6262 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6263#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6264 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6265 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6266 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6267 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6268#endif
6269 } else if (event == NETDEV_UNREGISTER &&
6270 dev->reg_state != NETREG_UNREGISTERED) {
6271 /* NETDEV_UNREGISTER could be fired for multiple times by
6272 * netdev_wait_allrefs(). Make sure we only call this once.
6273 */
6274 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6275#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6276 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6277 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6278#endif
6279 }
6280
6281 return NOTIFY_OK;
6282}
6283
6284/*
6285 * /proc
6286 */
6287
6288#ifdef CONFIG_PROC_FS
6289static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6290{
6291 struct net *net = (struct net *)seq->private;
6292 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6293 net->ipv6.rt6_stats->fib_nodes,
6294 net->ipv6.rt6_stats->fib_route_nodes,
6295 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6296 net->ipv6.rt6_stats->fib_rt_entries,
6297 net->ipv6.rt6_stats->fib_rt_cache,
6298 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6299 net->ipv6.rt6_stats->fib_discarded_routes);
6300
6301 return 0;
6302}
6303#endif /* CONFIG_PROC_FS */
6304
6305#ifdef CONFIG_SYSCTL
6306
6307static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6308 void *buffer, size_t *lenp, loff_t *ppos)
6309{
6310 struct net *net;
6311 int delay;
6312 int ret;
6313 if (!write)
6314 return -EINVAL;
6315
6316 net = (struct net *)ctl->extra1;
6317 delay = net->ipv6.sysctl.flush_delay;
6318 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6319 if (ret)
6320 return ret;
6321
6322 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6323 return 0;
6324}
6325
6326static struct ctl_table ipv6_route_table_template[] = {
6327 {
6328 .procname = "flush",
6329 .data = &init_net.ipv6.sysctl.flush_delay,
6330 .maxlen = sizeof(int),
6331 .mode = 0200,
6332 .proc_handler = ipv6_sysctl_rtcache_flush
6333 },
6334 {
6335 .procname = "gc_thresh",
6336 .data = &ip6_dst_ops_template.gc_thresh,
6337 .maxlen = sizeof(int),
6338 .mode = 0644,
6339 .proc_handler = proc_dointvec,
6340 },
6341 {
6342 .procname = "max_size",
6343 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6344 .maxlen = sizeof(int),
6345 .mode = 0644,
6346 .proc_handler = proc_dointvec,
6347 },
6348 {
6349 .procname = "gc_min_interval",
6350 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6351 .maxlen = sizeof(int),
6352 .mode = 0644,
6353 .proc_handler = proc_dointvec_jiffies,
6354 },
6355 {
6356 .procname = "gc_timeout",
6357 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6358 .maxlen = sizeof(int),
6359 .mode = 0644,
6360 .proc_handler = proc_dointvec_jiffies,
6361 },
6362 {
6363 .procname = "gc_interval",
6364 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6365 .maxlen = sizeof(int),
6366 .mode = 0644,
6367 .proc_handler = proc_dointvec_jiffies,
6368 },
6369 {
6370 .procname = "gc_elasticity",
6371 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6372 .maxlen = sizeof(int),
6373 .mode = 0644,
6374 .proc_handler = proc_dointvec,
6375 },
6376 {
6377 .procname = "mtu_expires",
6378 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6379 .maxlen = sizeof(int),
6380 .mode = 0644,
6381 .proc_handler = proc_dointvec_jiffies,
6382 },
6383 {
6384 .procname = "min_adv_mss",
6385 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6386 .maxlen = sizeof(int),
6387 .mode = 0644,
6388 .proc_handler = proc_dointvec,
6389 },
6390 {
6391 .procname = "gc_min_interval_ms",
6392 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6393 .maxlen = sizeof(int),
6394 .mode = 0644,
6395 .proc_handler = proc_dointvec_ms_jiffies,
6396 },
6397 {
6398 .procname = "skip_notify_on_dev_down",
6399 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6400 .maxlen = sizeof(int),
6401 .mode = 0644,
6402 .proc_handler = proc_dointvec_minmax,
6403 .extra1 = SYSCTL_ZERO,
6404 .extra2 = SYSCTL_ONE,
6405 },
6406 { }
6407};
6408
6409struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6410{
6411 struct ctl_table *table;
6412
6413 table = kmemdup(ipv6_route_table_template,
6414 sizeof(ipv6_route_table_template),
6415 GFP_KERNEL);
6416
6417 if (table) {
6418 table[0].data = &net->ipv6.sysctl.flush_delay;
6419 table[0].extra1 = net;
6420 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6421 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6422 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6423 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6424 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6425 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6426 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6427 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6428 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6429 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6430
6431 /* Don't export sysctls to unprivileged users */
6432 if (net->user_ns != &init_user_ns)
6433 table[0].procname = NULL;
6434 }
6435
6436 return table;
6437}
6438#endif
6439
6440static int __net_init ip6_route_net_init(struct net *net)
6441{
6442 int ret = -ENOMEM;
6443
6444 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6445 sizeof(net->ipv6.ip6_dst_ops));
6446
6447 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6448 goto out_ip6_dst_ops;
6449
6450 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6451 if (!net->ipv6.fib6_null_entry)
6452 goto out_ip6_dst_entries;
6453 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6454 sizeof(*net->ipv6.fib6_null_entry));
6455
6456 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6457 sizeof(*net->ipv6.ip6_null_entry),
6458 GFP_KERNEL);
6459 if (!net->ipv6.ip6_null_entry)
6460 goto out_fib6_null_entry;
6461 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6462 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6463 ip6_template_metrics, true);
6464 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6465
6466#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6467 net->ipv6.fib6_has_custom_rules = false;
6468 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6469 sizeof(*net->ipv6.ip6_prohibit_entry),
6470 GFP_KERNEL);
6471 if (!net->ipv6.ip6_prohibit_entry)
6472 goto out_ip6_null_entry;
6473 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6474 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6475 ip6_template_metrics, true);
6476 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6477
6478 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6479 sizeof(*net->ipv6.ip6_blk_hole_entry),
6480 GFP_KERNEL);
6481 if (!net->ipv6.ip6_blk_hole_entry)
6482 goto out_ip6_prohibit_entry;
6483 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6484 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6485 ip6_template_metrics, true);
6486 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6487#ifdef CONFIG_IPV6_SUBTREES
6488 net->ipv6.fib6_routes_require_src = 0;
6489#endif
6490#endif
6491
6492 net->ipv6.sysctl.flush_delay = 0;
6493 net->ipv6.sysctl.ip6_rt_max_size = 4096;
6494 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6495 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6496 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6497 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6498 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6499 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6500 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6501
6502 net->ipv6.ip6_rt_gc_expire = 30*HZ;
6503
6504 ret = 0;
6505out:
6506 return ret;
6507
6508#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6509out_ip6_prohibit_entry:
6510 kfree(net->ipv6.ip6_prohibit_entry);
6511out_ip6_null_entry:
6512 kfree(net->ipv6.ip6_null_entry);
6513#endif
6514out_fib6_null_entry:
6515 kfree(net->ipv6.fib6_null_entry);
6516out_ip6_dst_entries:
6517 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6518out_ip6_dst_ops:
6519 goto out;
6520}
6521
6522static void __net_exit ip6_route_net_exit(struct net *net)
6523{
6524 kfree(net->ipv6.fib6_null_entry);
6525 kfree(net->ipv6.ip6_null_entry);
6526#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6527 kfree(net->ipv6.ip6_prohibit_entry);
6528 kfree(net->ipv6.ip6_blk_hole_entry);
6529#endif
6530 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6531}
6532
6533static int __net_init ip6_route_net_init_late(struct net *net)
6534{
6535#ifdef CONFIG_PROC_FS
6536 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
6537 sizeof(struct ipv6_route_iter));
6538 proc_create_net_single("rt6_stats", 0444, net->proc_net,
6539 rt6_stats_seq_show, NULL);
6540#endif
6541 return 0;
6542}
6543
6544static void __net_exit ip6_route_net_exit_late(struct net *net)
6545{
6546#ifdef CONFIG_PROC_FS
6547 remove_proc_entry("ipv6_route", net->proc_net);
6548 remove_proc_entry("rt6_stats", net->proc_net);
6549#endif
6550}
6551
6552static struct pernet_operations ip6_route_net_ops = {
6553 .init = ip6_route_net_init,
6554 .exit = ip6_route_net_exit,
6555};
6556
6557static int __net_init ipv6_inetpeer_init(struct net *net)
6558{
6559 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6560
6561 if (!bp)
6562 return -ENOMEM;
6563 inet_peer_base_init(bp);
6564 net->ipv6.peers = bp;
6565 return 0;
6566}
6567
6568static void __net_exit ipv6_inetpeer_exit(struct net *net)
6569{
6570 struct inet_peer_base *bp = net->ipv6.peers;
6571
6572 net->ipv6.peers = NULL;
6573 inetpeer_invalidate_tree(bp);
6574 kfree(bp);
6575}
6576
6577static struct pernet_operations ipv6_inetpeer_ops = {
6578 .init = ipv6_inetpeer_init,
6579 .exit = ipv6_inetpeer_exit,
6580};
6581
6582static struct pernet_operations ip6_route_net_late_ops = {
6583 .init = ip6_route_net_init_late,
6584 .exit = ip6_route_net_exit_late,
6585};
6586
6587static struct notifier_block ip6_route_dev_notifier = {
6588 .notifier_call = ip6_route_dev_notify,
6589 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6590};
6591
6592void __init ip6_route_init_special_entries(void)
6593{
6594 /* Registering of the loopback is done before this portion of code,
6595 * the loopback reference in rt6_info will not be taken, do it
6596 * manually for init_net */
6597 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6598 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6599 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6600 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6601 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6602 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6603 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6604 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6605 #endif
6606}
6607
6608#if IS_BUILTIN(CONFIG_IPV6)
6609#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6610DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6611
6612BTF_ID_LIST(btf_fib6_info_id)
6613BTF_ID(struct, fib6_info)
6614
6615static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6616 .seq_ops = &ipv6_route_seq_ops,
6617 .init_seq_private = bpf_iter_init_seq_net,
6618 .fini_seq_private = bpf_iter_fini_seq_net,
6619 .seq_priv_size = sizeof(struct ipv6_route_iter),
6620};
6621
6622static struct bpf_iter_reg ipv6_route_reg_info = {
6623 .target = "ipv6_route",
6624 .ctx_arg_info_size = 1,
6625 .ctx_arg_info = {
6626 { offsetof(struct bpf_iter__ipv6_route, rt),
6627 PTR_TO_BTF_ID_OR_NULL },
6628 },
6629 .seq_info = &ipv6_route_seq_info,
6630};
6631
6632static int __init bpf_iter_register(void)
6633{
6634 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6635 return bpf_iter_reg_target(&ipv6_route_reg_info);
6636}
6637
6638static void bpf_iter_unregister(void)
6639{
6640 bpf_iter_unreg_target(&ipv6_route_reg_info);
6641}
6642#endif
6643#endif
6644
6645int __init ip6_route_init(void)
6646{
6647 int ret;
6648 int cpu;
6649
6650 ret = -ENOMEM;
6651 ip6_dst_ops_template.kmem_cachep =
6652 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6653 SLAB_HWCACHE_ALIGN, NULL);
6654 if (!ip6_dst_ops_template.kmem_cachep)
6655 goto out;
6656
6657 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6658 if (ret)
6659 goto out_kmem_cache;
6660
6661 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6662 if (ret)
6663 goto out_dst_entries;
6664
6665 ret = register_pernet_subsys(&ip6_route_net_ops);
6666 if (ret)
6667 goto out_register_inetpeer;
6668
6669 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6670
6671 ret = fib6_init();
6672 if (ret)
6673 goto out_register_subsys;
6674
6675 ret = xfrm6_init();
6676 if (ret)
6677 goto out_fib6_init;
6678
6679 ret = fib6_rules_init();
6680 if (ret)
6681 goto xfrm6_init;
6682
6683 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6684 if (ret)
6685 goto fib6_rules_init;
6686
6687 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6688 inet6_rtm_newroute, NULL, 0);
6689 if (ret < 0)
6690 goto out_register_late_subsys;
6691
6692 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6693 inet6_rtm_delroute, NULL, 0);
6694 if (ret < 0)
6695 goto out_register_late_subsys;
6696
6697 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6698 inet6_rtm_getroute, NULL,
6699 RTNL_FLAG_DOIT_UNLOCKED);
6700 if (ret < 0)
6701 goto out_register_late_subsys;
6702
6703 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6704 if (ret)
6705 goto out_register_late_subsys;
6706
6707#if IS_BUILTIN(CONFIG_IPV6)
6708#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6709 ret = bpf_iter_register();
6710 if (ret)
6711 goto out_register_late_subsys;
6712#endif
6713#endif
6714
6715 for_each_possible_cpu(cpu) {
6716 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6717
6718 INIT_LIST_HEAD(&ul->head);
6719 spin_lock_init(&ul->lock);
6720 }
6721
6722out:
6723 return ret;
6724
6725out_register_late_subsys:
6726 rtnl_unregister_all(PF_INET6);
6727 unregister_pernet_subsys(&ip6_route_net_late_ops);
6728fib6_rules_init:
6729 fib6_rules_cleanup();
6730xfrm6_init:
6731 xfrm6_fini();
6732out_fib6_init:
6733 fib6_gc_cleanup();
6734out_register_subsys:
6735 unregister_pernet_subsys(&ip6_route_net_ops);
6736out_register_inetpeer:
6737 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6738out_dst_entries:
6739 dst_entries_destroy(&ip6_dst_blackhole_ops);
6740out_kmem_cache:
6741 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6742 goto out;
6743}
6744
6745void ip6_route_cleanup(void)
6746{
6747#if IS_BUILTIN(CONFIG_IPV6)
6748#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6749 bpf_iter_unregister();
6750#endif
6751#endif
6752 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6753 unregister_pernet_subsys(&ip6_route_net_late_ops);
6754 fib6_rules_cleanup();
6755 xfrm6_fini();
6756 fib6_gc_cleanup();
6757 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6758 unregister_pernet_subsys(&ip6_route_net_ops);
6759 dst_entries_destroy(&ip6_dst_blackhole_ops);
6760 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6761}
1/*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27#define pr_fmt(fmt) "IPv6: " fmt
28
29#include <linux/capability.h>
30#include <linux/errno.h>
31#include <linux/export.h>
32#include <linux/types.h>
33#include <linux/times.h>
34#include <linux/socket.h>
35#include <linux/sockios.h>
36#include <linux/net.h>
37#include <linux/route.h>
38#include <linux/netdevice.h>
39#include <linux/in6.h>
40#include <linux/mroute6.h>
41#include <linux/init.h>
42#include <linux/if_arp.h>
43#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
45#include <linux/nsproxy.h>
46#include <linux/slab.h>
47#include <net/net_namespace.h>
48#include <net/snmp.h>
49#include <net/ipv6.h>
50#include <net/ip6_fib.h>
51#include <net/ip6_route.h>
52#include <net/ndisc.h>
53#include <net/addrconf.h>
54#include <net/tcp.h>
55#include <linux/rtnetlink.h>
56#include <net/dst.h>
57#include <net/xfrm.h>
58#include <net/netevent.h>
59#include <net/netlink.h>
60#include <net/nexthop.h>
61
62#include <asm/uaccess.h>
63
64#ifdef CONFIG_SYSCTL
65#include <linux/sysctl.h>
66#endif
67
68enum rt6_nud_state {
69 RT6_NUD_FAIL_HARD = -3,
70 RT6_NUD_FAIL_PROBE = -2,
71 RT6_NUD_FAIL_DO_RR = -1,
72 RT6_NUD_SUCCEED = 1
73};
74
75static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
76 const struct in6_addr *dest);
77static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
78static unsigned int ip6_default_advmss(const struct dst_entry *dst);
79static unsigned int ip6_mtu(const struct dst_entry *dst);
80static struct dst_entry *ip6_negative_advice(struct dst_entry *);
81static void ip6_dst_destroy(struct dst_entry *);
82static void ip6_dst_ifdown(struct dst_entry *,
83 struct net_device *dev, int how);
84static int ip6_dst_gc(struct dst_ops *ops);
85
86static int ip6_pkt_discard(struct sk_buff *skb);
87static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
88static int ip6_pkt_prohibit(struct sk_buff *skb);
89static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
90static void ip6_link_failure(struct sk_buff *skb);
91static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
92 struct sk_buff *skb, u32 mtu);
93static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
94 struct sk_buff *skb);
95static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
96
97#ifdef CONFIG_IPV6_ROUTE_INFO
98static struct rt6_info *rt6_add_route_info(struct net *net,
99 const struct in6_addr *prefix, int prefixlen,
100 const struct in6_addr *gwaddr, int ifindex,
101 unsigned int pref);
102static struct rt6_info *rt6_get_route_info(struct net *net,
103 const struct in6_addr *prefix, int prefixlen,
104 const struct in6_addr *gwaddr, int ifindex);
105#endif
106
107static void rt6_bind_peer(struct rt6_info *rt, int create)
108{
109 struct inet_peer_base *base;
110 struct inet_peer *peer;
111
112 base = inetpeer_base_ptr(rt->_rt6i_peer);
113 if (!base)
114 return;
115
116 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
117 if (peer) {
118 if (!rt6_set_peer(rt, peer))
119 inet_putpeer(peer);
120 }
121}
122
123static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
124{
125 if (rt6_has_peer(rt))
126 return rt6_peer_ptr(rt);
127
128 rt6_bind_peer(rt, create);
129 return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
130}
131
132static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
133{
134 return __rt6_get_peer(rt, 1);
135}
136
137static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
138{
139 struct rt6_info *rt = (struct rt6_info *) dst;
140 struct inet_peer *peer;
141 u32 *p = NULL;
142
143 if (!(rt->dst.flags & DST_HOST))
144 return NULL;
145
146 peer = rt6_get_peer_create(rt);
147 if (peer) {
148 u32 *old_p = __DST_METRICS_PTR(old);
149 unsigned long prev, new;
150
151 p = peer->metrics;
152 if (inet_metrics_new(peer) ||
153 (old & DST_METRICS_FORCE_OVERWRITE))
154 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
155
156 new = (unsigned long) p;
157 prev = cmpxchg(&dst->_metrics, old, new);
158
159 if (prev != old) {
160 p = __DST_METRICS_PTR(prev);
161 if (prev & DST_METRICS_READ_ONLY)
162 p = NULL;
163 }
164 }
165 return p;
166}
167
168static inline const void *choose_neigh_daddr(struct rt6_info *rt,
169 struct sk_buff *skb,
170 const void *daddr)
171{
172 struct in6_addr *p = &rt->rt6i_gateway;
173
174 if (!ipv6_addr_any(p))
175 return (const void *) p;
176 else if (skb)
177 return &ipv6_hdr(skb)->daddr;
178 return daddr;
179}
180
181static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
182 struct sk_buff *skb,
183 const void *daddr)
184{
185 struct rt6_info *rt = (struct rt6_info *) dst;
186 struct neighbour *n;
187
188 daddr = choose_neigh_daddr(rt, skb, daddr);
189 n = __ipv6_neigh_lookup(dst->dev, daddr);
190 if (n)
191 return n;
192 return neigh_create(&nd_tbl, daddr, dst->dev);
193}
194
195static struct dst_ops ip6_dst_ops_template = {
196 .family = AF_INET6,
197 .protocol = cpu_to_be16(ETH_P_IPV6),
198 .gc = ip6_dst_gc,
199 .gc_thresh = 1024,
200 .check = ip6_dst_check,
201 .default_advmss = ip6_default_advmss,
202 .mtu = ip6_mtu,
203 .cow_metrics = ipv6_cow_metrics,
204 .destroy = ip6_dst_destroy,
205 .ifdown = ip6_dst_ifdown,
206 .negative_advice = ip6_negative_advice,
207 .link_failure = ip6_link_failure,
208 .update_pmtu = ip6_rt_update_pmtu,
209 .redirect = rt6_do_redirect,
210 .local_out = __ip6_local_out,
211 .neigh_lookup = ip6_neigh_lookup,
212};
213
214static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
215{
216 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
217
218 return mtu ? : dst->dev->mtu;
219}
220
221static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
222 struct sk_buff *skb, u32 mtu)
223{
224}
225
226static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
227 struct sk_buff *skb)
228{
229}
230
231static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
232 unsigned long old)
233{
234 return NULL;
235}
236
237static struct dst_ops ip6_dst_blackhole_ops = {
238 .family = AF_INET6,
239 .protocol = cpu_to_be16(ETH_P_IPV6),
240 .destroy = ip6_dst_destroy,
241 .check = ip6_dst_check,
242 .mtu = ip6_blackhole_mtu,
243 .default_advmss = ip6_default_advmss,
244 .update_pmtu = ip6_rt_blackhole_update_pmtu,
245 .redirect = ip6_rt_blackhole_redirect,
246 .cow_metrics = ip6_rt_blackhole_cow_metrics,
247 .neigh_lookup = ip6_neigh_lookup,
248};
249
250static const u32 ip6_template_metrics[RTAX_MAX] = {
251 [RTAX_HOPLIMIT - 1] = 0,
252};
253
254static const struct rt6_info ip6_null_entry_template = {
255 .dst = {
256 .__refcnt = ATOMIC_INIT(1),
257 .__use = 1,
258 .obsolete = DST_OBSOLETE_FORCE_CHK,
259 .error = -ENETUNREACH,
260 .input = ip6_pkt_discard,
261 .output = ip6_pkt_discard_out,
262 },
263 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
264 .rt6i_protocol = RTPROT_KERNEL,
265 .rt6i_metric = ~(u32) 0,
266 .rt6i_ref = ATOMIC_INIT(1),
267};
268
269#ifdef CONFIG_IPV6_MULTIPLE_TABLES
270
271static const struct rt6_info ip6_prohibit_entry_template = {
272 .dst = {
273 .__refcnt = ATOMIC_INIT(1),
274 .__use = 1,
275 .obsolete = DST_OBSOLETE_FORCE_CHK,
276 .error = -EACCES,
277 .input = ip6_pkt_prohibit,
278 .output = ip6_pkt_prohibit_out,
279 },
280 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
281 .rt6i_protocol = RTPROT_KERNEL,
282 .rt6i_metric = ~(u32) 0,
283 .rt6i_ref = ATOMIC_INIT(1),
284};
285
286static const struct rt6_info ip6_blk_hole_entry_template = {
287 .dst = {
288 .__refcnt = ATOMIC_INIT(1),
289 .__use = 1,
290 .obsolete = DST_OBSOLETE_FORCE_CHK,
291 .error = -EINVAL,
292 .input = dst_discard,
293 .output = dst_discard_sk,
294 },
295 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
296 .rt6i_protocol = RTPROT_KERNEL,
297 .rt6i_metric = ~(u32) 0,
298 .rt6i_ref = ATOMIC_INIT(1),
299};
300
301#endif
302
303/* allocate dst with ip6_dst_ops */
304static inline struct rt6_info *ip6_dst_alloc(struct net *net,
305 struct net_device *dev,
306 int flags,
307 struct fib6_table *table)
308{
309 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
310 0, DST_OBSOLETE_FORCE_CHK, flags);
311
312 if (rt) {
313 struct dst_entry *dst = &rt->dst;
314
315 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
316 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
317 rt->rt6i_genid = rt_genid_ipv6(net);
318 INIT_LIST_HEAD(&rt->rt6i_siblings);
319 }
320 return rt;
321}
322
323static void ip6_dst_destroy(struct dst_entry *dst)
324{
325 struct rt6_info *rt = (struct rt6_info *)dst;
326 struct inet6_dev *idev = rt->rt6i_idev;
327 struct dst_entry *from = dst->from;
328
329 if (!(rt->dst.flags & DST_HOST))
330 dst_destroy_metrics_generic(dst);
331
332 if (idev) {
333 rt->rt6i_idev = NULL;
334 in6_dev_put(idev);
335 }
336
337 dst->from = NULL;
338 dst_release(from);
339
340 if (rt6_has_peer(rt)) {
341 struct inet_peer *peer = rt6_peer_ptr(rt);
342 inet_putpeer(peer);
343 }
344}
345
346static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
347 int how)
348{
349 struct rt6_info *rt = (struct rt6_info *)dst;
350 struct inet6_dev *idev = rt->rt6i_idev;
351 struct net_device *loopback_dev =
352 dev_net(dev)->loopback_dev;
353
354 if (dev != loopback_dev) {
355 if (idev && idev->dev == dev) {
356 struct inet6_dev *loopback_idev =
357 in6_dev_get(loopback_dev);
358 if (loopback_idev) {
359 rt->rt6i_idev = loopback_idev;
360 in6_dev_put(idev);
361 }
362 }
363 }
364}
365
366static bool rt6_check_expired(const struct rt6_info *rt)
367{
368 if (rt->rt6i_flags & RTF_EXPIRES) {
369 if (time_after(jiffies, rt->dst.expires))
370 return true;
371 } else if (rt->dst.from) {
372 return rt6_check_expired((struct rt6_info *) rt->dst.from);
373 }
374 return false;
375}
376
377/* Multipath route selection:
378 * Hash based function using packet header and flowlabel.
379 * Adapted from fib_info_hashfn()
380 */
381static int rt6_info_hash_nhsfn(unsigned int candidate_count,
382 const struct flowi6 *fl6)
383{
384 unsigned int val = fl6->flowi6_proto;
385
386 val ^= ipv6_addr_hash(&fl6->daddr);
387 val ^= ipv6_addr_hash(&fl6->saddr);
388
389 /* Work only if this not encapsulated */
390 switch (fl6->flowi6_proto) {
391 case IPPROTO_UDP:
392 case IPPROTO_TCP:
393 case IPPROTO_SCTP:
394 val ^= (__force u16)fl6->fl6_sport;
395 val ^= (__force u16)fl6->fl6_dport;
396 break;
397
398 case IPPROTO_ICMPV6:
399 val ^= (__force u16)fl6->fl6_icmp_type;
400 val ^= (__force u16)fl6->fl6_icmp_code;
401 break;
402 }
403 /* RFC6438 recommands to use flowlabel */
404 val ^= (__force u32)fl6->flowlabel;
405
406 /* Perhaps, we need to tune, this function? */
407 val = val ^ (val >> 7) ^ (val >> 12);
408 return val % candidate_count;
409}
410
411static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
412 struct flowi6 *fl6, int oif,
413 int strict)
414{
415 struct rt6_info *sibling, *next_sibling;
416 int route_choosen;
417
418 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
419 /* Don't change the route, if route_choosen == 0
420 * (siblings does not include ourself)
421 */
422 if (route_choosen)
423 list_for_each_entry_safe(sibling, next_sibling,
424 &match->rt6i_siblings, rt6i_siblings) {
425 route_choosen--;
426 if (route_choosen == 0) {
427 if (rt6_score_route(sibling, oif, strict) < 0)
428 break;
429 match = sibling;
430 break;
431 }
432 }
433 return match;
434}
435
436/*
437 * Route lookup. Any table->tb6_lock is implied.
438 */
439
440static inline struct rt6_info *rt6_device_match(struct net *net,
441 struct rt6_info *rt,
442 const struct in6_addr *saddr,
443 int oif,
444 int flags)
445{
446 struct rt6_info *local = NULL;
447 struct rt6_info *sprt;
448
449 if (!oif && ipv6_addr_any(saddr))
450 goto out;
451
452 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
453 struct net_device *dev = sprt->dst.dev;
454
455 if (oif) {
456 if (dev->ifindex == oif)
457 return sprt;
458 if (dev->flags & IFF_LOOPBACK) {
459 if (!sprt->rt6i_idev ||
460 sprt->rt6i_idev->dev->ifindex != oif) {
461 if (flags & RT6_LOOKUP_F_IFACE && oif)
462 continue;
463 if (local && (!oif ||
464 local->rt6i_idev->dev->ifindex == oif))
465 continue;
466 }
467 local = sprt;
468 }
469 } else {
470 if (ipv6_chk_addr(net, saddr, dev,
471 flags & RT6_LOOKUP_F_IFACE))
472 return sprt;
473 }
474 }
475
476 if (oif) {
477 if (local)
478 return local;
479
480 if (flags & RT6_LOOKUP_F_IFACE)
481 return net->ipv6.ip6_null_entry;
482 }
483out:
484 return rt;
485}
486
487#ifdef CONFIG_IPV6_ROUTER_PREF
488struct __rt6_probe_work {
489 struct work_struct work;
490 struct in6_addr target;
491 struct net_device *dev;
492};
493
494static void rt6_probe_deferred(struct work_struct *w)
495{
496 struct in6_addr mcaddr;
497 struct __rt6_probe_work *work =
498 container_of(w, struct __rt6_probe_work, work);
499
500 addrconf_addr_solict_mult(&work->target, &mcaddr);
501 ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
502 dev_put(work->dev);
503 kfree(w);
504}
505
506static void rt6_probe(struct rt6_info *rt)
507{
508 struct neighbour *neigh;
509 /*
510 * Okay, this does not seem to be appropriate
511 * for now, however, we need to check if it
512 * is really so; aka Router Reachability Probing.
513 *
514 * Router Reachability Probe MUST be rate-limited
515 * to no more than one per minute.
516 */
517 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
518 return;
519 rcu_read_lock_bh();
520 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
521 if (neigh) {
522 write_lock(&neigh->lock);
523 if (neigh->nud_state & NUD_VALID)
524 goto out;
525 }
526
527 if (!neigh ||
528 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
529 struct __rt6_probe_work *work;
530
531 work = kmalloc(sizeof(*work), GFP_ATOMIC);
532
533 if (neigh && work)
534 __neigh_set_probe_once(neigh);
535
536 if (neigh)
537 write_unlock(&neigh->lock);
538
539 if (work) {
540 INIT_WORK(&work->work, rt6_probe_deferred);
541 work->target = rt->rt6i_gateway;
542 dev_hold(rt->dst.dev);
543 work->dev = rt->dst.dev;
544 schedule_work(&work->work);
545 }
546 } else {
547out:
548 write_unlock(&neigh->lock);
549 }
550 rcu_read_unlock_bh();
551}
552#else
553static inline void rt6_probe(struct rt6_info *rt)
554{
555}
556#endif
557
558/*
559 * Default Router Selection (RFC 2461 6.3.6)
560 */
561static inline int rt6_check_dev(struct rt6_info *rt, int oif)
562{
563 struct net_device *dev = rt->dst.dev;
564 if (!oif || dev->ifindex == oif)
565 return 2;
566 if ((dev->flags & IFF_LOOPBACK) &&
567 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
568 return 1;
569 return 0;
570}
571
572static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
573{
574 struct neighbour *neigh;
575 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
576
577 if (rt->rt6i_flags & RTF_NONEXTHOP ||
578 !(rt->rt6i_flags & RTF_GATEWAY))
579 return RT6_NUD_SUCCEED;
580
581 rcu_read_lock_bh();
582 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
583 if (neigh) {
584 read_lock(&neigh->lock);
585 if (neigh->nud_state & NUD_VALID)
586 ret = RT6_NUD_SUCCEED;
587#ifdef CONFIG_IPV6_ROUTER_PREF
588 else if (!(neigh->nud_state & NUD_FAILED))
589 ret = RT6_NUD_SUCCEED;
590 else
591 ret = RT6_NUD_FAIL_PROBE;
592#endif
593 read_unlock(&neigh->lock);
594 } else {
595 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
596 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
597 }
598 rcu_read_unlock_bh();
599
600 return ret;
601}
602
603static int rt6_score_route(struct rt6_info *rt, int oif,
604 int strict)
605{
606 int m;
607
608 m = rt6_check_dev(rt, oif);
609 if (!m && (strict & RT6_LOOKUP_F_IFACE))
610 return RT6_NUD_FAIL_HARD;
611#ifdef CONFIG_IPV6_ROUTER_PREF
612 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
613#endif
614 if (strict & RT6_LOOKUP_F_REACHABLE) {
615 int n = rt6_check_neigh(rt);
616 if (n < 0)
617 return n;
618 }
619 return m;
620}
621
622static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
623 int *mpri, struct rt6_info *match,
624 bool *do_rr)
625{
626 int m;
627 bool match_do_rr = false;
628
629 if (rt6_check_expired(rt))
630 goto out;
631
632 m = rt6_score_route(rt, oif, strict);
633 if (m == RT6_NUD_FAIL_DO_RR) {
634 match_do_rr = true;
635 m = 0; /* lowest valid score */
636 } else if (m == RT6_NUD_FAIL_HARD) {
637 goto out;
638 }
639
640 if (strict & RT6_LOOKUP_F_REACHABLE)
641 rt6_probe(rt);
642
643 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
644 if (m > *mpri) {
645 *do_rr = match_do_rr;
646 *mpri = m;
647 match = rt;
648 }
649out:
650 return match;
651}
652
653static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
654 struct rt6_info *rr_head,
655 u32 metric, int oif, int strict,
656 bool *do_rr)
657{
658 struct rt6_info *rt, *match;
659 int mpri = -1;
660
661 match = NULL;
662 for (rt = rr_head; rt && rt->rt6i_metric == metric;
663 rt = rt->dst.rt6_next)
664 match = find_match(rt, oif, strict, &mpri, match, do_rr);
665 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
666 rt = rt->dst.rt6_next)
667 match = find_match(rt, oif, strict, &mpri, match, do_rr);
668
669 return match;
670}
671
672static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
673{
674 struct rt6_info *match, *rt0;
675 struct net *net;
676 bool do_rr = false;
677
678 rt0 = fn->rr_ptr;
679 if (!rt0)
680 fn->rr_ptr = rt0 = fn->leaf;
681
682 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
683 &do_rr);
684
685 if (do_rr) {
686 struct rt6_info *next = rt0->dst.rt6_next;
687
688 /* no entries matched; do round-robin */
689 if (!next || next->rt6i_metric != rt0->rt6i_metric)
690 next = fn->leaf;
691
692 if (next != rt0)
693 fn->rr_ptr = next;
694 }
695
696 net = dev_net(rt0->dst.dev);
697 return match ? match : net->ipv6.ip6_null_entry;
698}
699
700#ifdef CONFIG_IPV6_ROUTE_INFO
701int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
702 const struct in6_addr *gwaddr)
703{
704 struct net *net = dev_net(dev);
705 struct route_info *rinfo = (struct route_info *) opt;
706 struct in6_addr prefix_buf, *prefix;
707 unsigned int pref;
708 unsigned long lifetime;
709 struct rt6_info *rt;
710
711 if (len < sizeof(struct route_info)) {
712 return -EINVAL;
713 }
714
715 /* Sanity check for prefix_len and length */
716 if (rinfo->length > 3) {
717 return -EINVAL;
718 } else if (rinfo->prefix_len > 128) {
719 return -EINVAL;
720 } else if (rinfo->prefix_len > 64) {
721 if (rinfo->length < 2) {
722 return -EINVAL;
723 }
724 } else if (rinfo->prefix_len > 0) {
725 if (rinfo->length < 1) {
726 return -EINVAL;
727 }
728 }
729
730 pref = rinfo->route_pref;
731 if (pref == ICMPV6_ROUTER_PREF_INVALID)
732 return -EINVAL;
733
734 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
735
736 if (rinfo->length == 3)
737 prefix = (struct in6_addr *)rinfo->prefix;
738 else {
739 /* this function is safe */
740 ipv6_addr_prefix(&prefix_buf,
741 (struct in6_addr *)rinfo->prefix,
742 rinfo->prefix_len);
743 prefix = &prefix_buf;
744 }
745
746 if (rinfo->prefix_len == 0)
747 rt = rt6_get_dflt_router(gwaddr, dev);
748 else
749 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
750 gwaddr, dev->ifindex);
751
752 if (rt && !lifetime) {
753 ip6_del_rt(rt);
754 rt = NULL;
755 }
756
757 if (!rt && lifetime)
758 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
759 pref);
760 else if (rt)
761 rt->rt6i_flags = RTF_ROUTEINFO |
762 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
763
764 if (rt) {
765 if (!addrconf_finite_timeout(lifetime))
766 rt6_clean_expires(rt);
767 else
768 rt6_set_expires(rt, jiffies + HZ * lifetime);
769
770 ip6_rt_put(rt);
771 }
772 return 0;
773}
774#endif
775
776#define BACKTRACK(__net, saddr) \
777do { \
778 if (rt == __net->ipv6.ip6_null_entry) { \
779 struct fib6_node *pn; \
780 while (1) { \
781 if (fn->fn_flags & RTN_TL_ROOT) \
782 goto out; \
783 pn = fn->parent; \
784 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
785 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
786 else \
787 fn = pn; \
788 if (fn->fn_flags & RTN_RTINFO) \
789 goto restart; \
790 } \
791 } \
792} while (0)
793
794static struct rt6_info *ip6_pol_route_lookup(struct net *net,
795 struct fib6_table *table,
796 struct flowi6 *fl6, int flags)
797{
798 struct fib6_node *fn;
799 struct rt6_info *rt;
800
801 read_lock_bh(&table->tb6_lock);
802 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
803restart:
804 rt = fn->leaf;
805 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
806 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
807 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
808 BACKTRACK(net, &fl6->saddr);
809out:
810 dst_use(&rt->dst, jiffies);
811 read_unlock_bh(&table->tb6_lock);
812 return rt;
813
814}
815
816struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
817 int flags)
818{
819 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
820}
821EXPORT_SYMBOL_GPL(ip6_route_lookup);
822
823struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
824 const struct in6_addr *saddr, int oif, int strict)
825{
826 struct flowi6 fl6 = {
827 .flowi6_oif = oif,
828 .daddr = *daddr,
829 };
830 struct dst_entry *dst;
831 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
832
833 if (saddr) {
834 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
835 flags |= RT6_LOOKUP_F_HAS_SADDR;
836 }
837
838 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
839 if (dst->error == 0)
840 return (struct rt6_info *) dst;
841
842 dst_release(dst);
843
844 return NULL;
845}
846
847EXPORT_SYMBOL(rt6_lookup);
848
849/* ip6_ins_rt is called with FREE table->tb6_lock.
850 It takes new route entry, the addition fails by any reason the
851 route is freed. In any case, if caller does not hold it, it may
852 be destroyed.
853 */
854
855static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
856 struct nlattr *mx, int mx_len)
857{
858 int err;
859 struct fib6_table *table;
860
861 table = rt->rt6i_table;
862 write_lock_bh(&table->tb6_lock);
863 err = fib6_add(&table->tb6_root, rt, info, mx, mx_len);
864 write_unlock_bh(&table->tb6_lock);
865
866 return err;
867}
868
869int ip6_ins_rt(struct rt6_info *rt)
870{
871 struct nl_info info = {
872 .nl_net = dev_net(rt->dst.dev),
873 };
874 return __ip6_ins_rt(rt, &info, NULL, 0);
875}
876
877static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
878 const struct in6_addr *daddr,
879 const struct in6_addr *saddr)
880{
881 struct rt6_info *rt;
882
883 /*
884 * Clone the route.
885 */
886
887 rt = ip6_rt_copy(ort, daddr);
888
889 if (rt) {
890 if (ort->rt6i_dst.plen != 128 &&
891 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
892 rt->rt6i_flags |= RTF_ANYCAST;
893
894 rt->rt6i_flags |= RTF_CACHE;
895
896#ifdef CONFIG_IPV6_SUBTREES
897 if (rt->rt6i_src.plen && saddr) {
898 rt->rt6i_src.addr = *saddr;
899 rt->rt6i_src.plen = 128;
900 }
901#endif
902 }
903
904 return rt;
905}
906
907static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
908 const struct in6_addr *daddr)
909{
910 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
911
912 if (rt)
913 rt->rt6i_flags |= RTF_CACHE;
914 return rt;
915}
916
917static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
918 struct flowi6 *fl6, int flags)
919{
920 struct fib6_node *fn;
921 struct rt6_info *rt, *nrt;
922 int strict = 0;
923 int attempts = 3;
924 int err;
925 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
926
927 strict |= flags & RT6_LOOKUP_F_IFACE;
928
929relookup:
930 read_lock_bh(&table->tb6_lock);
931
932restart_2:
933 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
934
935restart:
936 rt = rt6_select(fn, oif, strict | reachable);
937 if (rt->rt6i_nsiblings)
938 rt = rt6_multipath_select(rt, fl6, oif, strict | reachable);
939 BACKTRACK(net, &fl6->saddr);
940 if (rt == net->ipv6.ip6_null_entry ||
941 rt->rt6i_flags & RTF_CACHE)
942 goto out;
943
944 dst_hold(&rt->dst);
945 read_unlock_bh(&table->tb6_lock);
946
947 if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
948 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
949 else if (!(rt->dst.flags & DST_HOST))
950 nrt = rt6_alloc_clone(rt, &fl6->daddr);
951 else
952 goto out2;
953
954 ip6_rt_put(rt);
955 rt = nrt ? : net->ipv6.ip6_null_entry;
956
957 dst_hold(&rt->dst);
958 if (nrt) {
959 err = ip6_ins_rt(nrt);
960 if (!err)
961 goto out2;
962 }
963
964 if (--attempts <= 0)
965 goto out2;
966
967 /*
968 * Race condition! In the gap, when table->tb6_lock was
969 * released someone could insert this route. Relookup.
970 */
971 ip6_rt_put(rt);
972 goto relookup;
973
974out:
975 if (reachable) {
976 reachable = 0;
977 goto restart_2;
978 }
979 dst_hold(&rt->dst);
980 read_unlock_bh(&table->tb6_lock);
981out2:
982 rt->dst.lastuse = jiffies;
983 rt->dst.__use++;
984
985 return rt;
986}
987
988static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
989 struct flowi6 *fl6, int flags)
990{
991 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
992}
993
994static struct dst_entry *ip6_route_input_lookup(struct net *net,
995 struct net_device *dev,
996 struct flowi6 *fl6, int flags)
997{
998 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
999 flags |= RT6_LOOKUP_F_IFACE;
1000
1001 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1002}
1003
1004void ip6_route_input(struct sk_buff *skb)
1005{
1006 const struct ipv6hdr *iph = ipv6_hdr(skb);
1007 struct net *net = dev_net(skb->dev);
1008 int flags = RT6_LOOKUP_F_HAS_SADDR;
1009 struct flowi6 fl6 = {
1010 .flowi6_iif = skb->dev->ifindex,
1011 .daddr = iph->daddr,
1012 .saddr = iph->saddr,
1013 .flowlabel = ip6_flowinfo(iph),
1014 .flowi6_mark = skb->mark,
1015 .flowi6_proto = iph->nexthdr,
1016 };
1017
1018 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1019}
1020
1021static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1022 struct flowi6 *fl6, int flags)
1023{
1024 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1025}
1026
1027struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
1028 struct flowi6 *fl6)
1029{
1030 int flags = 0;
1031
1032 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1033
1034 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1035 flags |= RT6_LOOKUP_F_IFACE;
1036
1037 if (!ipv6_addr_any(&fl6->saddr))
1038 flags |= RT6_LOOKUP_F_HAS_SADDR;
1039 else if (sk)
1040 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1041
1042 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1043}
1044
1045EXPORT_SYMBOL(ip6_route_output);
1046
1047struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1048{
1049 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1050 struct dst_entry *new = NULL;
1051
1052 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1053 if (rt) {
1054 new = &rt->dst;
1055
1056 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1057 rt6_init_peer(rt, net->ipv6.peers);
1058
1059 new->__use = 1;
1060 new->input = dst_discard;
1061 new->output = dst_discard_sk;
1062
1063 if (dst_metrics_read_only(&ort->dst))
1064 new->_metrics = ort->dst._metrics;
1065 else
1066 dst_copy_metrics(new, &ort->dst);
1067 rt->rt6i_idev = ort->rt6i_idev;
1068 if (rt->rt6i_idev)
1069 in6_dev_hold(rt->rt6i_idev);
1070
1071 rt->rt6i_gateway = ort->rt6i_gateway;
1072 rt->rt6i_flags = ort->rt6i_flags;
1073 rt->rt6i_metric = 0;
1074
1075 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1076#ifdef CONFIG_IPV6_SUBTREES
1077 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1078#endif
1079
1080 dst_free(new);
1081 }
1082
1083 dst_release(dst_orig);
1084 return new ? new : ERR_PTR(-ENOMEM);
1085}
1086
1087/*
1088 * Destination cache support functions
1089 */
1090
1091static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1092{
1093 struct rt6_info *rt;
1094
1095 rt = (struct rt6_info *) dst;
1096
1097 /* All IPV6 dsts are created with ->obsolete set to the value
1098 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1099 * into this function always.
1100 */
1101 if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
1102 return NULL;
1103
1104 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1105 return NULL;
1106
1107 if (rt6_check_expired(rt))
1108 return NULL;
1109
1110 return dst;
1111}
1112
1113static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1114{
1115 struct rt6_info *rt = (struct rt6_info *) dst;
1116
1117 if (rt) {
1118 if (rt->rt6i_flags & RTF_CACHE) {
1119 if (rt6_check_expired(rt)) {
1120 ip6_del_rt(rt);
1121 dst = NULL;
1122 }
1123 } else {
1124 dst_release(dst);
1125 dst = NULL;
1126 }
1127 }
1128 return dst;
1129}
1130
1131static void ip6_link_failure(struct sk_buff *skb)
1132{
1133 struct rt6_info *rt;
1134
1135 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1136
1137 rt = (struct rt6_info *) skb_dst(skb);
1138 if (rt) {
1139 if (rt->rt6i_flags & RTF_CACHE) {
1140 dst_hold(&rt->dst);
1141 if (ip6_del_rt(rt))
1142 dst_free(&rt->dst);
1143 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1144 rt->rt6i_node->fn_sernum = -1;
1145 }
1146 }
1147}
1148
1149static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1150 struct sk_buff *skb, u32 mtu)
1151{
1152 struct rt6_info *rt6 = (struct rt6_info*)dst;
1153
1154 dst_confirm(dst);
1155 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1156 struct net *net = dev_net(dst->dev);
1157
1158 rt6->rt6i_flags |= RTF_MODIFIED;
1159 if (mtu < IPV6_MIN_MTU) {
1160 u32 features = dst_metric(dst, RTAX_FEATURES);
1161 mtu = IPV6_MIN_MTU;
1162 features |= RTAX_FEATURE_ALLFRAG;
1163 dst_metric_set(dst, RTAX_FEATURES, features);
1164 }
1165 dst_metric_set(dst, RTAX_MTU, mtu);
1166 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1167 }
1168}
1169
1170void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1171 int oif, u32 mark)
1172{
1173 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1174 struct dst_entry *dst;
1175 struct flowi6 fl6;
1176
1177 memset(&fl6, 0, sizeof(fl6));
1178 fl6.flowi6_oif = oif;
1179 fl6.flowi6_mark = mark;
1180 fl6.daddr = iph->daddr;
1181 fl6.saddr = iph->saddr;
1182 fl6.flowlabel = ip6_flowinfo(iph);
1183
1184 dst = ip6_route_output(net, NULL, &fl6);
1185 if (!dst->error)
1186 ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1187 dst_release(dst);
1188}
1189EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1190
1191void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1192{
1193 ip6_update_pmtu(skb, sock_net(sk), mtu,
1194 sk->sk_bound_dev_if, sk->sk_mark);
1195}
1196EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1197
1198/* Handle redirects */
1199struct ip6rd_flowi {
1200 struct flowi6 fl6;
1201 struct in6_addr gateway;
1202};
1203
1204static struct rt6_info *__ip6_route_redirect(struct net *net,
1205 struct fib6_table *table,
1206 struct flowi6 *fl6,
1207 int flags)
1208{
1209 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1210 struct rt6_info *rt;
1211 struct fib6_node *fn;
1212
1213 /* Get the "current" route for this destination and
1214 * check if the redirect has come from approriate router.
1215 *
1216 * RFC 4861 specifies that redirects should only be
1217 * accepted if they come from the nexthop to the target.
1218 * Due to the way the routes are chosen, this notion
1219 * is a bit fuzzy and one might need to check all possible
1220 * routes.
1221 */
1222
1223 read_lock_bh(&table->tb6_lock);
1224 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1225restart:
1226 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1227 if (rt6_check_expired(rt))
1228 continue;
1229 if (rt->dst.error)
1230 break;
1231 if (!(rt->rt6i_flags & RTF_GATEWAY))
1232 continue;
1233 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1234 continue;
1235 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1236 continue;
1237 break;
1238 }
1239
1240 if (!rt)
1241 rt = net->ipv6.ip6_null_entry;
1242 else if (rt->dst.error) {
1243 rt = net->ipv6.ip6_null_entry;
1244 goto out;
1245 }
1246 BACKTRACK(net, &fl6->saddr);
1247out:
1248 dst_hold(&rt->dst);
1249
1250 read_unlock_bh(&table->tb6_lock);
1251
1252 return rt;
1253};
1254
1255static struct dst_entry *ip6_route_redirect(struct net *net,
1256 const struct flowi6 *fl6,
1257 const struct in6_addr *gateway)
1258{
1259 int flags = RT6_LOOKUP_F_HAS_SADDR;
1260 struct ip6rd_flowi rdfl;
1261
1262 rdfl.fl6 = *fl6;
1263 rdfl.gateway = *gateway;
1264
1265 return fib6_rule_lookup(net, &rdfl.fl6,
1266 flags, __ip6_route_redirect);
1267}
1268
1269void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1270{
1271 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1272 struct dst_entry *dst;
1273 struct flowi6 fl6;
1274
1275 memset(&fl6, 0, sizeof(fl6));
1276 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1277 fl6.flowi6_oif = oif;
1278 fl6.flowi6_mark = mark;
1279 fl6.daddr = iph->daddr;
1280 fl6.saddr = iph->saddr;
1281 fl6.flowlabel = ip6_flowinfo(iph);
1282
1283 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1284 rt6_do_redirect(dst, NULL, skb);
1285 dst_release(dst);
1286}
1287EXPORT_SYMBOL_GPL(ip6_redirect);
1288
1289void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1290 u32 mark)
1291{
1292 const struct ipv6hdr *iph = ipv6_hdr(skb);
1293 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1294 struct dst_entry *dst;
1295 struct flowi6 fl6;
1296
1297 memset(&fl6, 0, sizeof(fl6));
1298 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1299 fl6.flowi6_oif = oif;
1300 fl6.flowi6_mark = mark;
1301 fl6.daddr = msg->dest;
1302 fl6.saddr = iph->daddr;
1303
1304 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1305 rt6_do_redirect(dst, NULL, skb);
1306 dst_release(dst);
1307}
1308
1309void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1310{
1311 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1312}
1313EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1314
1315static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1316{
1317 struct net_device *dev = dst->dev;
1318 unsigned int mtu = dst_mtu(dst);
1319 struct net *net = dev_net(dev);
1320
1321 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1322
1323 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1324 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1325
1326 /*
1327 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1328 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1329 * IPV6_MAXPLEN is also valid and means: "any MSS,
1330 * rely only on pmtu discovery"
1331 */
1332 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1333 mtu = IPV6_MAXPLEN;
1334 return mtu;
1335}
1336
1337static unsigned int ip6_mtu(const struct dst_entry *dst)
1338{
1339 struct inet6_dev *idev;
1340 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1341
1342 if (mtu)
1343 goto out;
1344
1345 mtu = IPV6_MIN_MTU;
1346
1347 rcu_read_lock();
1348 idev = __in6_dev_get(dst->dev);
1349 if (idev)
1350 mtu = idev->cnf.mtu6;
1351 rcu_read_unlock();
1352
1353out:
1354 return min_t(unsigned int, mtu, IP6_MAX_MTU);
1355}
1356
1357static struct dst_entry *icmp6_dst_gc_list;
1358static DEFINE_SPINLOCK(icmp6_dst_lock);
1359
1360struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1361 struct flowi6 *fl6)
1362{
1363 struct dst_entry *dst;
1364 struct rt6_info *rt;
1365 struct inet6_dev *idev = in6_dev_get(dev);
1366 struct net *net = dev_net(dev);
1367
1368 if (unlikely(!idev))
1369 return ERR_PTR(-ENODEV);
1370
1371 rt = ip6_dst_alloc(net, dev, 0, NULL);
1372 if (unlikely(!rt)) {
1373 in6_dev_put(idev);
1374 dst = ERR_PTR(-ENOMEM);
1375 goto out;
1376 }
1377
1378 rt->dst.flags |= DST_HOST;
1379 rt->dst.output = ip6_output;
1380 atomic_set(&rt->dst.__refcnt, 1);
1381 rt->rt6i_gateway = fl6->daddr;
1382 rt->rt6i_dst.addr = fl6->daddr;
1383 rt->rt6i_dst.plen = 128;
1384 rt->rt6i_idev = idev;
1385 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1386
1387 spin_lock_bh(&icmp6_dst_lock);
1388 rt->dst.next = icmp6_dst_gc_list;
1389 icmp6_dst_gc_list = &rt->dst;
1390 spin_unlock_bh(&icmp6_dst_lock);
1391
1392 fib6_force_start_gc(net);
1393
1394 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1395
1396out:
1397 return dst;
1398}
1399
1400int icmp6_dst_gc(void)
1401{
1402 struct dst_entry *dst, **pprev;
1403 int more = 0;
1404
1405 spin_lock_bh(&icmp6_dst_lock);
1406 pprev = &icmp6_dst_gc_list;
1407
1408 while ((dst = *pprev) != NULL) {
1409 if (!atomic_read(&dst->__refcnt)) {
1410 *pprev = dst->next;
1411 dst_free(dst);
1412 } else {
1413 pprev = &dst->next;
1414 ++more;
1415 }
1416 }
1417
1418 spin_unlock_bh(&icmp6_dst_lock);
1419
1420 return more;
1421}
1422
1423static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1424 void *arg)
1425{
1426 struct dst_entry *dst, **pprev;
1427
1428 spin_lock_bh(&icmp6_dst_lock);
1429 pprev = &icmp6_dst_gc_list;
1430 while ((dst = *pprev) != NULL) {
1431 struct rt6_info *rt = (struct rt6_info *) dst;
1432 if (func(rt, arg)) {
1433 *pprev = dst->next;
1434 dst_free(dst);
1435 } else {
1436 pprev = &dst->next;
1437 }
1438 }
1439 spin_unlock_bh(&icmp6_dst_lock);
1440}
1441
1442static int ip6_dst_gc(struct dst_ops *ops)
1443{
1444 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1445 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1446 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1447 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1448 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1449 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1450 int entries;
1451
1452 entries = dst_entries_get_fast(ops);
1453 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1454 entries <= rt_max_size)
1455 goto out;
1456
1457 net->ipv6.ip6_rt_gc_expire++;
1458 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
1459 entries = dst_entries_get_slow(ops);
1460 if (entries < ops->gc_thresh)
1461 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1462out:
1463 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1464 return entries > rt_max_size;
1465}
1466
1467/*
1468 *
1469 */
1470
1471int ip6_route_add(struct fib6_config *cfg)
1472{
1473 int err;
1474 struct net *net = cfg->fc_nlinfo.nl_net;
1475 struct rt6_info *rt = NULL;
1476 struct net_device *dev = NULL;
1477 struct inet6_dev *idev = NULL;
1478 struct fib6_table *table;
1479 int addr_type;
1480
1481 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1482 return -EINVAL;
1483#ifndef CONFIG_IPV6_SUBTREES
1484 if (cfg->fc_src_len)
1485 return -EINVAL;
1486#endif
1487 if (cfg->fc_ifindex) {
1488 err = -ENODEV;
1489 dev = dev_get_by_index(net, cfg->fc_ifindex);
1490 if (!dev)
1491 goto out;
1492 idev = in6_dev_get(dev);
1493 if (!idev)
1494 goto out;
1495 }
1496
1497 if (cfg->fc_metric == 0)
1498 cfg->fc_metric = IP6_RT_PRIO_USER;
1499
1500 err = -ENOBUFS;
1501 if (cfg->fc_nlinfo.nlh &&
1502 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1503 table = fib6_get_table(net, cfg->fc_table);
1504 if (!table) {
1505 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1506 table = fib6_new_table(net, cfg->fc_table);
1507 }
1508 } else {
1509 table = fib6_new_table(net, cfg->fc_table);
1510 }
1511
1512 if (!table)
1513 goto out;
1514
1515 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
1516
1517 if (!rt) {
1518 err = -ENOMEM;
1519 goto out;
1520 }
1521
1522 if (cfg->fc_flags & RTF_EXPIRES)
1523 rt6_set_expires(rt, jiffies +
1524 clock_t_to_jiffies(cfg->fc_expires));
1525 else
1526 rt6_clean_expires(rt);
1527
1528 if (cfg->fc_protocol == RTPROT_UNSPEC)
1529 cfg->fc_protocol = RTPROT_BOOT;
1530 rt->rt6i_protocol = cfg->fc_protocol;
1531
1532 addr_type = ipv6_addr_type(&cfg->fc_dst);
1533
1534 if (addr_type & IPV6_ADDR_MULTICAST)
1535 rt->dst.input = ip6_mc_input;
1536 else if (cfg->fc_flags & RTF_LOCAL)
1537 rt->dst.input = ip6_input;
1538 else
1539 rt->dst.input = ip6_forward;
1540
1541 rt->dst.output = ip6_output;
1542
1543 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1544 rt->rt6i_dst.plen = cfg->fc_dst_len;
1545 if (rt->rt6i_dst.plen == 128) {
1546 rt->dst.flags |= DST_HOST;
1547 dst_metrics_set_force_overwrite(&rt->dst);
1548 }
1549
1550#ifdef CONFIG_IPV6_SUBTREES
1551 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1552 rt->rt6i_src.plen = cfg->fc_src_len;
1553#endif
1554
1555 rt->rt6i_metric = cfg->fc_metric;
1556
1557 /* We cannot add true routes via loopback here,
1558 they would result in kernel looping; promote them to reject routes
1559 */
1560 if ((cfg->fc_flags & RTF_REJECT) ||
1561 (dev && (dev->flags & IFF_LOOPBACK) &&
1562 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1563 !(cfg->fc_flags & RTF_LOCAL))) {
1564 /* hold loopback dev/idev if we haven't done so. */
1565 if (dev != net->loopback_dev) {
1566 if (dev) {
1567 dev_put(dev);
1568 in6_dev_put(idev);
1569 }
1570 dev = net->loopback_dev;
1571 dev_hold(dev);
1572 idev = in6_dev_get(dev);
1573 if (!idev) {
1574 err = -ENODEV;
1575 goto out;
1576 }
1577 }
1578 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1579 switch (cfg->fc_type) {
1580 case RTN_BLACKHOLE:
1581 rt->dst.error = -EINVAL;
1582 rt->dst.output = dst_discard_sk;
1583 rt->dst.input = dst_discard;
1584 break;
1585 case RTN_PROHIBIT:
1586 rt->dst.error = -EACCES;
1587 rt->dst.output = ip6_pkt_prohibit_out;
1588 rt->dst.input = ip6_pkt_prohibit;
1589 break;
1590 case RTN_THROW:
1591 default:
1592 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1593 : -ENETUNREACH;
1594 rt->dst.output = ip6_pkt_discard_out;
1595 rt->dst.input = ip6_pkt_discard;
1596 break;
1597 }
1598 goto install_route;
1599 }
1600
1601 if (cfg->fc_flags & RTF_GATEWAY) {
1602 const struct in6_addr *gw_addr;
1603 int gwa_type;
1604
1605 gw_addr = &cfg->fc_gateway;
1606 rt->rt6i_gateway = *gw_addr;
1607 gwa_type = ipv6_addr_type(gw_addr);
1608
1609 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1610 struct rt6_info *grt;
1611
1612 /* IPv6 strictly inhibits using not link-local
1613 addresses as nexthop address.
1614 Otherwise, router will not able to send redirects.
1615 It is very good, but in some (rare!) circumstances
1616 (SIT, PtP, NBMA NOARP links) it is handy to allow
1617 some exceptions. --ANK
1618 */
1619 err = -EINVAL;
1620 if (!(gwa_type & IPV6_ADDR_UNICAST))
1621 goto out;
1622
1623 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1624
1625 err = -EHOSTUNREACH;
1626 if (!grt)
1627 goto out;
1628 if (dev) {
1629 if (dev != grt->dst.dev) {
1630 ip6_rt_put(grt);
1631 goto out;
1632 }
1633 } else {
1634 dev = grt->dst.dev;
1635 idev = grt->rt6i_idev;
1636 dev_hold(dev);
1637 in6_dev_hold(grt->rt6i_idev);
1638 }
1639 if (!(grt->rt6i_flags & RTF_GATEWAY))
1640 err = 0;
1641 ip6_rt_put(grt);
1642
1643 if (err)
1644 goto out;
1645 }
1646 err = -EINVAL;
1647 if (!dev || (dev->flags & IFF_LOOPBACK))
1648 goto out;
1649 }
1650
1651 err = -ENODEV;
1652 if (!dev)
1653 goto out;
1654
1655 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1656 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1657 err = -EINVAL;
1658 goto out;
1659 }
1660 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1661 rt->rt6i_prefsrc.plen = 128;
1662 } else
1663 rt->rt6i_prefsrc.plen = 0;
1664
1665 rt->rt6i_flags = cfg->fc_flags;
1666
1667install_route:
1668 rt->dst.dev = dev;
1669 rt->rt6i_idev = idev;
1670 rt->rt6i_table = table;
1671
1672 cfg->fc_nlinfo.nl_net = dev_net(dev);
1673
1674 return __ip6_ins_rt(rt, &cfg->fc_nlinfo, cfg->fc_mx, cfg->fc_mx_len);
1675
1676out:
1677 if (dev)
1678 dev_put(dev);
1679 if (idev)
1680 in6_dev_put(idev);
1681 if (rt)
1682 dst_free(&rt->dst);
1683 return err;
1684}
1685
1686static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1687{
1688 int err;
1689 struct fib6_table *table;
1690 struct net *net = dev_net(rt->dst.dev);
1691
1692 if (rt == net->ipv6.ip6_null_entry) {
1693 err = -ENOENT;
1694 goto out;
1695 }
1696
1697 table = rt->rt6i_table;
1698 write_lock_bh(&table->tb6_lock);
1699 err = fib6_del(rt, info);
1700 write_unlock_bh(&table->tb6_lock);
1701
1702out:
1703 ip6_rt_put(rt);
1704 return err;
1705}
1706
1707int ip6_del_rt(struct rt6_info *rt)
1708{
1709 struct nl_info info = {
1710 .nl_net = dev_net(rt->dst.dev),
1711 };
1712 return __ip6_del_rt(rt, &info);
1713}
1714
1715static int ip6_route_del(struct fib6_config *cfg)
1716{
1717 struct fib6_table *table;
1718 struct fib6_node *fn;
1719 struct rt6_info *rt;
1720 int err = -ESRCH;
1721
1722 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1723 if (!table)
1724 return err;
1725
1726 read_lock_bh(&table->tb6_lock);
1727
1728 fn = fib6_locate(&table->tb6_root,
1729 &cfg->fc_dst, cfg->fc_dst_len,
1730 &cfg->fc_src, cfg->fc_src_len);
1731
1732 if (fn) {
1733 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1734 if (cfg->fc_ifindex &&
1735 (!rt->dst.dev ||
1736 rt->dst.dev->ifindex != cfg->fc_ifindex))
1737 continue;
1738 if (cfg->fc_flags & RTF_GATEWAY &&
1739 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1740 continue;
1741 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1742 continue;
1743 dst_hold(&rt->dst);
1744 read_unlock_bh(&table->tb6_lock);
1745
1746 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1747 }
1748 }
1749 read_unlock_bh(&table->tb6_lock);
1750
1751 return err;
1752}
1753
1754static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1755{
1756 struct net *net = dev_net(skb->dev);
1757 struct netevent_redirect netevent;
1758 struct rt6_info *rt, *nrt = NULL;
1759 struct ndisc_options ndopts;
1760 struct inet6_dev *in6_dev;
1761 struct neighbour *neigh;
1762 struct rd_msg *msg;
1763 int optlen, on_link;
1764 u8 *lladdr;
1765
1766 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1767 optlen -= sizeof(*msg);
1768
1769 if (optlen < 0) {
1770 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1771 return;
1772 }
1773
1774 msg = (struct rd_msg *)icmp6_hdr(skb);
1775
1776 if (ipv6_addr_is_multicast(&msg->dest)) {
1777 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1778 return;
1779 }
1780
1781 on_link = 0;
1782 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1783 on_link = 1;
1784 } else if (ipv6_addr_type(&msg->target) !=
1785 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1786 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1787 return;
1788 }
1789
1790 in6_dev = __in6_dev_get(skb->dev);
1791 if (!in6_dev)
1792 return;
1793 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1794 return;
1795
1796 /* RFC2461 8.1:
1797 * The IP source address of the Redirect MUST be the same as the current
1798 * first-hop router for the specified ICMP Destination Address.
1799 */
1800
1801 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1802 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1803 return;
1804 }
1805
1806 lladdr = NULL;
1807 if (ndopts.nd_opts_tgt_lladdr) {
1808 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1809 skb->dev);
1810 if (!lladdr) {
1811 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1812 return;
1813 }
1814 }
1815
1816 rt = (struct rt6_info *) dst;
1817 if (rt == net->ipv6.ip6_null_entry) {
1818 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1819 return;
1820 }
1821
1822 /* Redirect received -> path was valid.
1823 * Look, redirects are sent only in response to data packets,
1824 * so that this nexthop apparently is reachable. --ANK
1825 */
1826 dst_confirm(&rt->dst);
1827
1828 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1829 if (!neigh)
1830 return;
1831
1832 /*
1833 * We have finally decided to accept it.
1834 */
1835
1836 neigh_update(neigh, lladdr, NUD_STALE,
1837 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1838 NEIGH_UPDATE_F_OVERRIDE|
1839 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1840 NEIGH_UPDATE_F_ISROUTER))
1841 );
1842
1843 nrt = ip6_rt_copy(rt, &msg->dest);
1844 if (!nrt)
1845 goto out;
1846
1847 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1848 if (on_link)
1849 nrt->rt6i_flags &= ~RTF_GATEWAY;
1850
1851 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1852
1853 if (ip6_ins_rt(nrt))
1854 goto out;
1855
1856 netevent.old = &rt->dst;
1857 netevent.new = &nrt->dst;
1858 netevent.daddr = &msg->dest;
1859 netevent.neigh = neigh;
1860 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1861
1862 if (rt->rt6i_flags & RTF_CACHE) {
1863 rt = (struct rt6_info *) dst_clone(&rt->dst);
1864 ip6_del_rt(rt);
1865 }
1866
1867out:
1868 neigh_release(neigh);
1869}
1870
1871/*
1872 * Misc support functions
1873 */
1874
1875static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1876 const struct in6_addr *dest)
1877{
1878 struct net *net = dev_net(ort->dst.dev);
1879 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1880 ort->rt6i_table);
1881
1882 if (rt) {
1883 rt->dst.input = ort->dst.input;
1884 rt->dst.output = ort->dst.output;
1885 rt->dst.flags |= DST_HOST;
1886
1887 rt->rt6i_dst.addr = *dest;
1888 rt->rt6i_dst.plen = 128;
1889 dst_copy_metrics(&rt->dst, &ort->dst);
1890 rt->dst.error = ort->dst.error;
1891 rt->rt6i_idev = ort->rt6i_idev;
1892 if (rt->rt6i_idev)
1893 in6_dev_hold(rt->rt6i_idev);
1894 rt->dst.lastuse = jiffies;
1895
1896 if (ort->rt6i_flags & RTF_GATEWAY)
1897 rt->rt6i_gateway = ort->rt6i_gateway;
1898 else
1899 rt->rt6i_gateway = *dest;
1900 rt->rt6i_flags = ort->rt6i_flags;
1901 rt6_set_from(rt, ort);
1902 rt->rt6i_metric = 0;
1903
1904#ifdef CONFIG_IPV6_SUBTREES
1905 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1906#endif
1907 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1908 rt->rt6i_table = ort->rt6i_table;
1909 }
1910 return rt;
1911}
1912
1913#ifdef CONFIG_IPV6_ROUTE_INFO
1914static struct rt6_info *rt6_get_route_info(struct net *net,
1915 const struct in6_addr *prefix, int prefixlen,
1916 const struct in6_addr *gwaddr, int ifindex)
1917{
1918 struct fib6_node *fn;
1919 struct rt6_info *rt = NULL;
1920 struct fib6_table *table;
1921
1922 table = fib6_get_table(net, RT6_TABLE_INFO);
1923 if (!table)
1924 return NULL;
1925
1926 read_lock_bh(&table->tb6_lock);
1927 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1928 if (!fn)
1929 goto out;
1930
1931 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1932 if (rt->dst.dev->ifindex != ifindex)
1933 continue;
1934 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1935 continue;
1936 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1937 continue;
1938 dst_hold(&rt->dst);
1939 break;
1940 }
1941out:
1942 read_unlock_bh(&table->tb6_lock);
1943 return rt;
1944}
1945
1946static struct rt6_info *rt6_add_route_info(struct net *net,
1947 const struct in6_addr *prefix, int prefixlen,
1948 const struct in6_addr *gwaddr, int ifindex,
1949 unsigned int pref)
1950{
1951 struct fib6_config cfg = {
1952 .fc_table = RT6_TABLE_INFO,
1953 .fc_metric = IP6_RT_PRIO_USER,
1954 .fc_ifindex = ifindex,
1955 .fc_dst_len = prefixlen,
1956 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1957 RTF_UP | RTF_PREF(pref),
1958 .fc_nlinfo.portid = 0,
1959 .fc_nlinfo.nlh = NULL,
1960 .fc_nlinfo.nl_net = net,
1961 };
1962
1963 cfg.fc_dst = *prefix;
1964 cfg.fc_gateway = *gwaddr;
1965
1966 /* We should treat it as a default route if prefix length is 0. */
1967 if (!prefixlen)
1968 cfg.fc_flags |= RTF_DEFAULT;
1969
1970 ip6_route_add(&cfg);
1971
1972 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1973}
1974#endif
1975
1976struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1977{
1978 struct rt6_info *rt;
1979 struct fib6_table *table;
1980
1981 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1982 if (!table)
1983 return NULL;
1984
1985 read_lock_bh(&table->tb6_lock);
1986 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1987 if (dev == rt->dst.dev &&
1988 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1989 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1990 break;
1991 }
1992 if (rt)
1993 dst_hold(&rt->dst);
1994 read_unlock_bh(&table->tb6_lock);
1995 return rt;
1996}
1997
1998struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1999 struct net_device *dev,
2000 unsigned int pref)
2001{
2002 struct fib6_config cfg = {
2003 .fc_table = RT6_TABLE_DFLT,
2004 .fc_metric = IP6_RT_PRIO_USER,
2005 .fc_ifindex = dev->ifindex,
2006 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2007 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2008 .fc_nlinfo.portid = 0,
2009 .fc_nlinfo.nlh = NULL,
2010 .fc_nlinfo.nl_net = dev_net(dev),
2011 };
2012
2013 cfg.fc_gateway = *gwaddr;
2014
2015 ip6_route_add(&cfg);
2016
2017 return rt6_get_dflt_router(gwaddr, dev);
2018}
2019
2020void rt6_purge_dflt_routers(struct net *net)
2021{
2022 struct rt6_info *rt;
2023 struct fib6_table *table;
2024
2025 /* NOTE: Keep consistent with rt6_get_dflt_router */
2026 table = fib6_get_table(net, RT6_TABLE_DFLT);
2027 if (!table)
2028 return;
2029
2030restart:
2031 read_lock_bh(&table->tb6_lock);
2032 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2033 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2034 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2035 dst_hold(&rt->dst);
2036 read_unlock_bh(&table->tb6_lock);
2037 ip6_del_rt(rt);
2038 goto restart;
2039 }
2040 }
2041 read_unlock_bh(&table->tb6_lock);
2042}
2043
2044static void rtmsg_to_fib6_config(struct net *net,
2045 struct in6_rtmsg *rtmsg,
2046 struct fib6_config *cfg)
2047{
2048 memset(cfg, 0, sizeof(*cfg));
2049
2050 cfg->fc_table = RT6_TABLE_MAIN;
2051 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2052 cfg->fc_metric = rtmsg->rtmsg_metric;
2053 cfg->fc_expires = rtmsg->rtmsg_info;
2054 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2055 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2056 cfg->fc_flags = rtmsg->rtmsg_flags;
2057
2058 cfg->fc_nlinfo.nl_net = net;
2059
2060 cfg->fc_dst = rtmsg->rtmsg_dst;
2061 cfg->fc_src = rtmsg->rtmsg_src;
2062 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2063}
2064
2065int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2066{
2067 struct fib6_config cfg;
2068 struct in6_rtmsg rtmsg;
2069 int err;
2070
2071 switch(cmd) {
2072 case SIOCADDRT: /* Add a route */
2073 case SIOCDELRT: /* Delete a route */
2074 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2075 return -EPERM;
2076 err = copy_from_user(&rtmsg, arg,
2077 sizeof(struct in6_rtmsg));
2078 if (err)
2079 return -EFAULT;
2080
2081 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2082
2083 rtnl_lock();
2084 switch (cmd) {
2085 case SIOCADDRT:
2086 err = ip6_route_add(&cfg);
2087 break;
2088 case SIOCDELRT:
2089 err = ip6_route_del(&cfg);
2090 break;
2091 default:
2092 err = -EINVAL;
2093 }
2094 rtnl_unlock();
2095
2096 return err;
2097 }
2098
2099 return -EINVAL;
2100}
2101
2102/*
2103 * Drop the packet on the floor
2104 */
2105
2106static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2107{
2108 int type;
2109 struct dst_entry *dst = skb_dst(skb);
2110 switch (ipstats_mib_noroutes) {
2111 case IPSTATS_MIB_INNOROUTES:
2112 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2113 if (type == IPV6_ADDR_ANY) {
2114 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2115 IPSTATS_MIB_INADDRERRORS);
2116 break;
2117 }
2118 /* FALLTHROUGH */
2119 case IPSTATS_MIB_OUTNOROUTES:
2120 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2121 ipstats_mib_noroutes);
2122 break;
2123 }
2124 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2125 kfree_skb(skb);
2126 return 0;
2127}
2128
2129static int ip6_pkt_discard(struct sk_buff *skb)
2130{
2131 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2132}
2133
2134static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
2135{
2136 skb->dev = skb_dst(skb)->dev;
2137 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2138}
2139
2140static int ip6_pkt_prohibit(struct sk_buff *skb)
2141{
2142 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2143}
2144
2145static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
2146{
2147 skb->dev = skb_dst(skb)->dev;
2148 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2149}
2150
2151/*
2152 * Allocate a dst for local (unicast / anycast) address.
2153 */
2154
2155struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2156 const struct in6_addr *addr,
2157 bool anycast)
2158{
2159 struct net *net = dev_net(idev->dev);
2160 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2161 DST_NOCOUNT, NULL);
2162 if (!rt)
2163 return ERR_PTR(-ENOMEM);
2164
2165 in6_dev_hold(idev);
2166
2167 rt->dst.flags |= DST_HOST;
2168 rt->dst.input = ip6_input;
2169 rt->dst.output = ip6_output;
2170 rt->rt6i_idev = idev;
2171
2172 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2173 if (anycast)
2174 rt->rt6i_flags |= RTF_ANYCAST;
2175 else
2176 rt->rt6i_flags |= RTF_LOCAL;
2177
2178 rt->rt6i_gateway = *addr;
2179 rt->rt6i_dst.addr = *addr;
2180 rt->rt6i_dst.plen = 128;
2181 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2182
2183 atomic_set(&rt->dst.__refcnt, 1);
2184
2185 return rt;
2186}
2187
2188int ip6_route_get_saddr(struct net *net,
2189 struct rt6_info *rt,
2190 const struct in6_addr *daddr,
2191 unsigned int prefs,
2192 struct in6_addr *saddr)
2193{
2194 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2195 int err = 0;
2196 if (rt->rt6i_prefsrc.plen)
2197 *saddr = rt->rt6i_prefsrc.addr;
2198 else
2199 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2200 daddr, prefs, saddr);
2201 return err;
2202}
2203
2204/* remove deleted ip from prefsrc entries */
2205struct arg_dev_net_ip {
2206 struct net_device *dev;
2207 struct net *net;
2208 struct in6_addr *addr;
2209};
2210
2211static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2212{
2213 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2214 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2215 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2216
2217 if (((void *)rt->dst.dev == dev || !dev) &&
2218 rt != net->ipv6.ip6_null_entry &&
2219 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2220 /* remove prefsrc entry */
2221 rt->rt6i_prefsrc.plen = 0;
2222 }
2223 return 0;
2224}
2225
2226void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2227{
2228 struct net *net = dev_net(ifp->idev->dev);
2229 struct arg_dev_net_ip adni = {
2230 .dev = ifp->idev->dev,
2231 .net = net,
2232 .addr = &ifp->addr,
2233 };
2234 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2235}
2236
2237#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2238#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2239
2240/* Remove routers and update dst entries when gateway turn into host. */
2241static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2242{
2243 struct in6_addr *gateway = (struct in6_addr *)arg;
2244
2245 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2246 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2247 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2248 return -1;
2249 }
2250 return 0;
2251}
2252
2253void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2254{
2255 fib6_clean_all(net, fib6_clean_tohost, gateway);
2256}
2257
2258struct arg_dev_net {
2259 struct net_device *dev;
2260 struct net *net;
2261};
2262
2263static int fib6_ifdown(struct rt6_info *rt, void *arg)
2264{
2265 const struct arg_dev_net *adn = arg;
2266 const struct net_device *dev = adn->dev;
2267
2268 if ((rt->dst.dev == dev || !dev) &&
2269 rt != adn->net->ipv6.ip6_null_entry)
2270 return -1;
2271
2272 return 0;
2273}
2274
2275void rt6_ifdown(struct net *net, struct net_device *dev)
2276{
2277 struct arg_dev_net adn = {
2278 .dev = dev,
2279 .net = net,
2280 };
2281
2282 fib6_clean_all(net, fib6_ifdown, &adn);
2283 icmp6_clean_all(fib6_ifdown, &adn);
2284}
2285
2286struct rt6_mtu_change_arg {
2287 struct net_device *dev;
2288 unsigned int mtu;
2289};
2290
2291static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2292{
2293 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2294 struct inet6_dev *idev;
2295
2296 /* In IPv6 pmtu discovery is not optional,
2297 so that RTAX_MTU lock cannot disable it.
2298 We still use this lock to block changes
2299 caused by addrconf/ndisc.
2300 */
2301
2302 idev = __in6_dev_get(arg->dev);
2303 if (!idev)
2304 return 0;
2305
2306 /* For administrative MTU increase, there is no way to discover
2307 IPv6 PMTU increase, so PMTU increase should be updated here.
2308 Since RFC 1981 doesn't include administrative MTU increase
2309 update PMTU increase is a MUST. (i.e. jumbo frame)
2310 */
2311 /*
2312 If new MTU is less than route PMTU, this new MTU will be the
2313 lowest MTU in the path, update the route PMTU to reflect PMTU
2314 decreases; if new MTU is greater than route PMTU, and the
2315 old MTU is the lowest MTU in the path, update the route PMTU
2316 to reflect the increase. In this case if the other nodes' MTU
2317 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2318 PMTU discouvery.
2319 */
2320 if (rt->dst.dev == arg->dev &&
2321 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2322 (dst_mtu(&rt->dst) >= arg->mtu ||
2323 (dst_mtu(&rt->dst) < arg->mtu &&
2324 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2325 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2326 }
2327 return 0;
2328}
2329
2330void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2331{
2332 struct rt6_mtu_change_arg arg = {
2333 .dev = dev,
2334 .mtu = mtu,
2335 };
2336
2337 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2338}
2339
2340static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2341 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2342 [RTA_OIF] = { .type = NLA_U32 },
2343 [RTA_IIF] = { .type = NLA_U32 },
2344 [RTA_PRIORITY] = { .type = NLA_U32 },
2345 [RTA_METRICS] = { .type = NLA_NESTED },
2346 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2347};
2348
2349static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2350 struct fib6_config *cfg)
2351{
2352 struct rtmsg *rtm;
2353 struct nlattr *tb[RTA_MAX+1];
2354 int err;
2355
2356 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2357 if (err < 0)
2358 goto errout;
2359
2360 err = -EINVAL;
2361 rtm = nlmsg_data(nlh);
2362 memset(cfg, 0, sizeof(*cfg));
2363
2364 cfg->fc_table = rtm->rtm_table;
2365 cfg->fc_dst_len = rtm->rtm_dst_len;
2366 cfg->fc_src_len = rtm->rtm_src_len;
2367 cfg->fc_flags = RTF_UP;
2368 cfg->fc_protocol = rtm->rtm_protocol;
2369 cfg->fc_type = rtm->rtm_type;
2370
2371 if (rtm->rtm_type == RTN_UNREACHABLE ||
2372 rtm->rtm_type == RTN_BLACKHOLE ||
2373 rtm->rtm_type == RTN_PROHIBIT ||
2374 rtm->rtm_type == RTN_THROW)
2375 cfg->fc_flags |= RTF_REJECT;
2376
2377 if (rtm->rtm_type == RTN_LOCAL)
2378 cfg->fc_flags |= RTF_LOCAL;
2379
2380 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2381 cfg->fc_nlinfo.nlh = nlh;
2382 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2383
2384 if (tb[RTA_GATEWAY]) {
2385 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2386 cfg->fc_flags |= RTF_GATEWAY;
2387 }
2388
2389 if (tb[RTA_DST]) {
2390 int plen = (rtm->rtm_dst_len + 7) >> 3;
2391
2392 if (nla_len(tb[RTA_DST]) < plen)
2393 goto errout;
2394
2395 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2396 }
2397
2398 if (tb[RTA_SRC]) {
2399 int plen = (rtm->rtm_src_len + 7) >> 3;
2400
2401 if (nla_len(tb[RTA_SRC]) < plen)
2402 goto errout;
2403
2404 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2405 }
2406
2407 if (tb[RTA_PREFSRC])
2408 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2409
2410 if (tb[RTA_OIF])
2411 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2412
2413 if (tb[RTA_PRIORITY])
2414 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2415
2416 if (tb[RTA_METRICS]) {
2417 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2418 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2419 }
2420
2421 if (tb[RTA_TABLE])
2422 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2423
2424 if (tb[RTA_MULTIPATH]) {
2425 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2426 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2427 }
2428
2429 err = 0;
2430errout:
2431 return err;
2432}
2433
2434static int ip6_route_multipath(struct fib6_config *cfg, int add)
2435{
2436 struct fib6_config r_cfg;
2437 struct rtnexthop *rtnh;
2438 int remaining;
2439 int attrlen;
2440 int err = 0, last_err = 0;
2441
2442beginning:
2443 rtnh = (struct rtnexthop *)cfg->fc_mp;
2444 remaining = cfg->fc_mp_len;
2445
2446 /* Parse a Multipath Entry */
2447 while (rtnh_ok(rtnh, remaining)) {
2448 memcpy(&r_cfg, cfg, sizeof(*cfg));
2449 if (rtnh->rtnh_ifindex)
2450 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2451
2452 attrlen = rtnh_attrlen(rtnh);
2453 if (attrlen > 0) {
2454 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2455
2456 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2457 if (nla) {
2458 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2459 r_cfg.fc_flags |= RTF_GATEWAY;
2460 }
2461 }
2462 err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2463 if (err) {
2464 last_err = err;
2465 /* If we are trying to remove a route, do not stop the
2466 * loop when ip6_route_del() fails (because next hop is
2467 * already gone), we should try to remove all next hops.
2468 */
2469 if (add) {
2470 /* If add fails, we should try to delete all
2471 * next hops that have been already added.
2472 */
2473 add = 0;
2474 goto beginning;
2475 }
2476 }
2477 /* Because each route is added like a single route we remove
2478 * this flag after the first nexthop (if there is a collision,
2479 * we have already fail to add the first nexthop:
2480 * fib6_add_rt2node() has reject it).
2481 */
2482 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2483 rtnh = rtnh_next(rtnh, &remaining);
2484 }
2485
2486 return last_err;
2487}
2488
2489static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2490{
2491 struct fib6_config cfg;
2492 int err;
2493
2494 err = rtm_to_fib6_config(skb, nlh, &cfg);
2495 if (err < 0)
2496 return err;
2497
2498 if (cfg.fc_mp)
2499 return ip6_route_multipath(&cfg, 0);
2500 else
2501 return ip6_route_del(&cfg);
2502}
2503
2504static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2505{
2506 struct fib6_config cfg;
2507 int err;
2508
2509 err = rtm_to_fib6_config(skb, nlh, &cfg);
2510 if (err < 0)
2511 return err;
2512
2513 if (cfg.fc_mp)
2514 return ip6_route_multipath(&cfg, 1);
2515 else
2516 return ip6_route_add(&cfg);
2517}
2518
2519static inline size_t rt6_nlmsg_size(void)
2520{
2521 return NLMSG_ALIGN(sizeof(struct rtmsg))
2522 + nla_total_size(16) /* RTA_SRC */
2523 + nla_total_size(16) /* RTA_DST */
2524 + nla_total_size(16) /* RTA_GATEWAY */
2525 + nla_total_size(16) /* RTA_PREFSRC */
2526 + nla_total_size(4) /* RTA_TABLE */
2527 + nla_total_size(4) /* RTA_IIF */
2528 + nla_total_size(4) /* RTA_OIF */
2529 + nla_total_size(4) /* RTA_PRIORITY */
2530 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2531 + nla_total_size(sizeof(struct rta_cacheinfo));
2532}
2533
2534static int rt6_fill_node(struct net *net,
2535 struct sk_buff *skb, struct rt6_info *rt,
2536 struct in6_addr *dst, struct in6_addr *src,
2537 int iif, int type, u32 portid, u32 seq,
2538 int prefix, int nowait, unsigned int flags)
2539{
2540 struct rtmsg *rtm;
2541 struct nlmsghdr *nlh;
2542 long expires;
2543 u32 table;
2544
2545 if (prefix) { /* user wants prefix routes only */
2546 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2547 /* success since this is not a prefix route */
2548 return 1;
2549 }
2550 }
2551
2552 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2553 if (!nlh)
2554 return -EMSGSIZE;
2555
2556 rtm = nlmsg_data(nlh);
2557 rtm->rtm_family = AF_INET6;
2558 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2559 rtm->rtm_src_len = rt->rt6i_src.plen;
2560 rtm->rtm_tos = 0;
2561 if (rt->rt6i_table)
2562 table = rt->rt6i_table->tb6_id;
2563 else
2564 table = RT6_TABLE_UNSPEC;
2565 rtm->rtm_table = table;
2566 if (nla_put_u32(skb, RTA_TABLE, table))
2567 goto nla_put_failure;
2568 if (rt->rt6i_flags & RTF_REJECT) {
2569 switch (rt->dst.error) {
2570 case -EINVAL:
2571 rtm->rtm_type = RTN_BLACKHOLE;
2572 break;
2573 case -EACCES:
2574 rtm->rtm_type = RTN_PROHIBIT;
2575 break;
2576 case -EAGAIN:
2577 rtm->rtm_type = RTN_THROW;
2578 break;
2579 default:
2580 rtm->rtm_type = RTN_UNREACHABLE;
2581 break;
2582 }
2583 }
2584 else if (rt->rt6i_flags & RTF_LOCAL)
2585 rtm->rtm_type = RTN_LOCAL;
2586 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2587 rtm->rtm_type = RTN_LOCAL;
2588 else
2589 rtm->rtm_type = RTN_UNICAST;
2590 rtm->rtm_flags = 0;
2591 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2592 rtm->rtm_protocol = rt->rt6i_protocol;
2593 if (rt->rt6i_flags & RTF_DYNAMIC)
2594 rtm->rtm_protocol = RTPROT_REDIRECT;
2595 else if (rt->rt6i_flags & RTF_ADDRCONF) {
2596 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2597 rtm->rtm_protocol = RTPROT_RA;
2598 else
2599 rtm->rtm_protocol = RTPROT_KERNEL;
2600 }
2601
2602 if (rt->rt6i_flags & RTF_CACHE)
2603 rtm->rtm_flags |= RTM_F_CLONED;
2604
2605 if (dst) {
2606 if (nla_put(skb, RTA_DST, 16, dst))
2607 goto nla_put_failure;
2608 rtm->rtm_dst_len = 128;
2609 } else if (rtm->rtm_dst_len)
2610 if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2611 goto nla_put_failure;
2612#ifdef CONFIG_IPV6_SUBTREES
2613 if (src) {
2614 if (nla_put(skb, RTA_SRC, 16, src))
2615 goto nla_put_failure;
2616 rtm->rtm_src_len = 128;
2617 } else if (rtm->rtm_src_len &&
2618 nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2619 goto nla_put_failure;
2620#endif
2621 if (iif) {
2622#ifdef CONFIG_IPV6_MROUTE
2623 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2624 int err = ip6mr_get_route(net, skb, rtm, nowait);
2625 if (err <= 0) {
2626 if (!nowait) {
2627 if (err == 0)
2628 return 0;
2629 goto nla_put_failure;
2630 } else {
2631 if (err == -EMSGSIZE)
2632 goto nla_put_failure;
2633 }
2634 }
2635 } else
2636#endif
2637 if (nla_put_u32(skb, RTA_IIF, iif))
2638 goto nla_put_failure;
2639 } else if (dst) {
2640 struct in6_addr saddr_buf;
2641 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2642 nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2643 goto nla_put_failure;
2644 }
2645
2646 if (rt->rt6i_prefsrc.plen) {
2647 struct in6_addr saddr_buf;
2648 saddr_buf = rt->rt6i_prefsrc.addr;
2649 if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2650 goto nla_put_failure;
2651 }
2652
2653 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2654 goto nla_put_failure;
2655
2656 if (rt->rt6i_flags & RTF_GATEWAY) {
2657 if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2658 goto nla_put_failure;
2659 }
2660
2661 if (rt->dst.dev &&
2662 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2663 goto nla_put_failure;
2664 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2665 goto nla_put_failure;
2666
2667 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2668
2669 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2670 goto nla_put_failure;
2671
2672 return nlmsg_end(skb, nlh);
2673
2674nla_put_failure:
2675 nlmsg_cancel(skb, nlh);
2676 return -EMSGSIZE;
2677}
2678
2679int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2680{
2681 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2682 int prefix;
2683
2684 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2685 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2686 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2687 } else
2688 prefix = 0;
2689
2690 return rt6_fill_node(arg->net,
2691 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2692 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2693 prefix, 0, NLM_F_MULTI);
2694}
2695
2696static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2697{
2698 struct net *net = sock_net(in_skb->sk);
2699 struct nlattr *tb[RTA_MAX+1];
2700 struct rt6_info *rt;
2701 struct sk_buff *skb;
2702 struct rtmsg *rtm;
2703 struct flowi6 fl6;
2704 int err, iif = 0, oif = 0;
2705
2706 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2707 if (err < 0)
2708 goto errout;
2709
2710 err = -EINVAL;
2711 memset(&fl6, 0, sizeof(fl6));
2712
2713 if (tb[RTA_SRC]) {
2714 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2715 goto errout;
2716
2717 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2718 }
2719
2720 if (tb[RTA_DST]) {
2721 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2722 goto errout;
2723
2724 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2725 }
2726
2727 if (tb[RTA_IIF])
2728 iif = nla_get_u32(tb[RTA_IIF]);
2729
2730 if (tb[RTA_OIF])
2731 oif = nla_get_u32(tb[RTA_OIF]);
2732
2733 if (tb[RTA_MARK])
2734 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2735
2736 if (iif) {
2737 struct net_device *dev;
2738 int flags = 0;
2739
2740 dev = __dev_get_by_index(net, iif);
2741 if (!dev) {
2742 err = -ENODEV;
2743 goto errout;
2744 }
2745
2746 fl6.flowi6_iif = iif;
2747
2748 if (!ipv6_addr_any(&fl6.saddr))
2749 flags |= RT6_LOOKUP_F_HAS_SADDR;
2750
2751 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2752 flags);
2753 } else {
2754 fl6.flowi6_oif = oif;
2755
2756 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2757 }
2758
2759 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2760 if (!skb) {
2761 ip6_rt_put(rt);
2762 err = -ENOBUFS;
2763 goto errout;
2764 }
2765
2766 /* Reserve room for dummy headers, this skb can pass
2767 through good chunk of routing engine.
2768 */
2769 skb_reset_mac_header(skb);
2770 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2771
2772 skb_dst_set(skb, &rt->dst);
2773
2774 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2775 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2776 nlh->nlmsg_seq, 0, 0, 0);
2777 if (err < 0) {
2778 kfree_skb(skb);
2779 goto errout;
2780 }
2781
2782 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2783errout:
2784 return err;
2785}
2786
2787void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2788{
2789 struct sk_buff *skb;
2790 struct net *net = info->nl_net;
2791 u32 seq;
2792 int err;
2793
2794 err = -ENOBUFS;
2795 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2796
2797 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2798 if (!skb)
2799 goto errout;
2800
2801 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2802 event, info->portid, seq, 0, 0, 0);
2803 if (err < 0) {
2804 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2805 WARN_ON(err == -EMSGSIZE);
2806 kfree_skb(skb);
2807 goto errout;
2808 }
2809 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2810 info->nlh, gfp_any());
2811 return;
2812errout:
2813 if (err < 0)
2814 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2815}
2816
2817static int ip6_route_dev_notify(struct notifier_block *this,
2818 unsigned long event, void *ptr)
2819{
2820 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2821 struct net *net = dev_net(dev);
2822
2823 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2824 net->ipv6.ip6_null_entry->dst.dev = dev;
2825 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2826#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2827 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2828 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2829 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2830 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2831#endif
2832 }
2833
2834 return NOTIFY_OK;
2835}
2836
2837/*
2838 * /proc
2839 */
2840
2841#ifdef CONFIG_PROC_FS
2842
2843static const struct file_operations ipv6_route_proc_fops = {
2844 .owner = THIS_MODULE,
2845 .open = ipv6_route_open,
2846 .read = seq_read,
2847 .llseek = seq_lseek,
2848 .release = seq_release_net,
2849};
2850
2851static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2852{
2853 struct net *net = (struct net *)seq->private;
2854 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2855 net->ipv6.rt6_stats->fib_nodes,
2856 net->ipv6.rt6_stats->fib_route_nodes,
2857 net->ipv6.rt6_stats->fib_rt_alloc,
2858 net->ipv6.rt6_stats->fib_rt_entries,
2859 net->ipv6.rt6_stats->fib_rt_cache,
2860 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2861 net->ipv6.rt6_stats->fib_discarded_routes);
2862
2863 return 0;
2864}
2865
2866static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2867{
2868 return single_open_net(inode, file, rt6_stats_seq_show);
2869}
2870
2871static const struct file_operations rt6_stats_seq_fops = {
2872 .owner = THIS_MODULE,
2873 .open = rt6_stats_seq_open,
2874 .read = seq_read,
2875 .llseek = seq_lseek,
2876 .release = single_release_net,
2877};
2878#endif /* CONFIG_PROC_FS */
2879
2880#ifdef CONFIG_SYSCTL
2881
2882static
2883int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
2884 void __user *buffer, size_t *lenp, loff_t *ppos)
2885{
2886 struct net *net;
2887 int delay;
2888 if (!write)
2889 return -EINVAL;
2890
2891 net = (struct net *)ctl->extra1;
2892 delay = net->ipv6.sysctl.flush_delay;
2893 proc_dointvec(ctl, write, buffer, lenp, ppos);
2894 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
2895 return 0;
2896}
2897
2898struct ctl_table ipv6_route_table_template[] = {
2899 {
2900 .procname = "flush",
2901 .data = &init_net.ipv6.sysctl.flush_delay,
2902 .maxlen = sizeof(int),
2903 .mode = 0200,
2904 .proc_handler = ipv6_sysctl_rtcache_flush
2905 },
2906 {
2907 .procname = "gc_thresh",
2908 .data = &ip6_dst_ops_template.gc_thresh,
2909 .maxlen = sizeof(int),
2910 .mode = 0644,
2911 .proc_handler = proc_dointvec,
2912 },
2913 {
2914 .procname = "max_size",
2915 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2916 .maxlen = sizeof(int),
2917 .mode = 0644,
2918 .proc_handler = proc_dointvec,
2919 },
2920 {
2921 .procname = "gc_min_interval",
2922 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2923 .maxlen = sizeof(int),
2924 .mode = 0644,
2925 .proc_handler = proc_dointvec_jiffies,
2926 },
2927 {
2928 .procname = "gc_timeout",
2929 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2930 .maxlen = sizeof(int),
2931 .mode = 0644,
2932 .proc_handler = proc_dointvec_jiffies,
2933 },
2934 {
2935 .procname = "gc_interval",
2936 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2937 .maxlen = sizeof(int),
2938 .mode = 0644,
2939 .proc_handler = proc_dointvec_jiffies,
2940 },
2941 {
2942 .procname = "gc_elasticity",
2943 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2944 .maxlen = sizeof(int),
2945 .mode = 0644,
2946 .proc_handler = proc_dointvec,
2947 },
2948 {
2949 .procname = "mtu_expires",
2950 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2951 .maxlen = sizeof(int),
2952 .mode = 0644,
2953 .proc_handler = proc_dointvec_jiffies,
2954 },
2955 {
2956 .procname = "min_adv_mss",
2957 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2958 .maxlen = sizeof(int),
2959 .mode = 0644,
2960 .proc_handler = proc_dointvec,
2961 },
2962 {
2963 .procname = "gc_min_interval_ms",
2964 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2965 .maxlen = sizeof(int),
2966 .mode = 0644,
2967 .proc_handler = proc_dointvec_ms_jiffies,
2968 },
2969 { }
2970};
2971
2972struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2973{
2974 struct ctl_table *table;
2975
2976 table = kmemdup(ipv6_route_table_template,
2977 sizeof(ipv6_route_table_template),
2978 GFP_KERNEL);
2979
2980 if (table) {
2981 table[0].data = &net->ipv6.sysctl.flush_delay;
2982 table[0].extra1 = net;
2983 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2984 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2985 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2986 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2987 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2988 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2989 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2990 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2991 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2992
2993 /* Don't export sysctls to unprivileged users */
2994 if (net->user_ns != &init_user_ns)
2995 table[0].procname = NULL;
2996 }
2997
2998 return table;
2999}
3000#endif
3001
3002static int __net_init ip6_route_net_init(struct net *net)
3003{
3004 int ret = -ENOMEM;
3005
3006 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3007 sizeof(net->ipv6.ip6_dst_ops));
3008
3009 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3010 goto out_ip6_dst_ops;
3011
3012 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3013 sizeof(*net->ipv6.ip6_null_entry),
3014 GFP_KERNEL);
3015 if (!net->ipv6.ip6_null_entry)
3016 goto out_ip6_dst_entries;
3017 net->ipv6.ip6_null_entry->dst.path =
3018 (struct dst_entry *)net->ipv6.ip6_null_entry;
3019 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3020 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3021 ip6_template_metrics, true);
3022
3023#ifdef CONFIG_IPV6_MULTIPLE_TABLES
3024 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3025 sizeof(*net->ipv6.ip6_prohibit_entry),
3026 GFP_KERNEL);
3027 if (!net->ipv6.ip6_prohibit_entry)
3028 goto out_ip6_null_entry;
3029 net->ipv6.ip6_prohibit_entry->dst.path =
3030 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3031 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3032 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3033 ip6_template_metrics, true);
3034
3035 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3036 sizeof(*net->ipv6.ip6_blk_hole_entry),
3037 GFP_KERNEL);
3038 if (!net->ipv6.ip6_blk_hole_entry)
3039 goto out_ip6_prohibit_entry;
3040 net->ipv6.ip6_blk_hole_entry->dst.path =
3041 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3042 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3043 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3044 ip6_template_metrics, true);
3045#endif
3046
3047 net->ipv6.sysctl.flush_delay = 0;
3048 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3049 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3050 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3051 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3052 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3053 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3054 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3055
3056 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3057
3058 ret = 0;
3059out:
3060 return ret;
3061
3062#ifdef CONFIG_IPV6_MULTIPLE_TABLES
3063out_ip6_prohibit_entry:
3064 kfree(net->ipv6.ip6_prohibit_entry);
3065out_ip6_null_entry:
3066 kfree(net->ipv6.ip6_null_entry);
3067#endif
3068out_ip6_dst_entries:
3069 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3070out_ip6_dst_ops:
3071 goto out;
3072}
3073
3074static void __net_exit ip6_route_net_exit(struct net *net)
3075{
3076 kfree(net->ipv6.ip6_null_entry);
3077#ifdef CONFIG_IPV6_MULTIPLE_TABLES
3078 kfree(net->ipv6.ip6_prohibit_entry);
3079 kfree(net->ipv6.ip6_blk_hole_entry);
3080#endif
3081 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3082}
3083
3084static int __net_init ip6_route_net_init_late(struct net *net)
3085{
3086#ifdef CONFIG_PROC_FS
3087 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3088 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3089#endif
3090 return 0;
3091}
3092
3093static void __net_exit ip6_route_net_exit_late(struct net *net)
3094{
3095#ifdef CONFIG_PROC_FS
3096 remove_proc_entry("ipv6_route", net->proc_net);
3097 remove_proc_entry("rt6_stats", net->proc_net);
3098#endif
3099}
3100
3101static struct pernet_operations ip6_route_net_ops = {
3102 .init = ip6_route_net_init,
3103 .exit = ip6_route_net_exit,
3104};
3105
3106static int __net_init ipv6_inetpeer_init(struct net *net)
3107{
3108 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3109
3110 if (!bp)
3111 return -ENOMEM;
3112 inet_peer_base_init(bp);
3113 net->ipv6.peers = bp;
3114 return 0;
3115}
3116
3117static void __net_exit ipv6_inetpeer_exit(struct net *net)
3118{
3119 struct inet_peer_base *bp = net->ipv6.peers;
3120
3121 net->ipv6.peers = NULL;
3122 inetpeer_invalidate_tree(bp);
3123 kfree(bp);
3124}
3125
3126static struct pernet_operations ipv6_inetpeer_ops = {
3127 .init = ipv6_inetpeer_init,
3128 .exit = ipv6_inetpeer_exit,
3129};
3130
3131static struct pernet_operations ip6_route_net_late_ops = {
3132 .init = ip6_route_net_init_late,
3133 .exit = ip6_route_net_exit_late,
3134};
3135
3136static struct notifier_block ip6_route_dev_notifier = {
3137 .notifier_call = ip6_route_dev_notify,
3138 .priority = 0,
3139};
3140
3141int __init ip6_route_init(void)
3142{
3143 int ret;
3144
3145 ret = -ENOMEM;
3146 ip6_dst_ops_template.kmem_cachep =
3147 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3148 SLAB_HWCACHE_ALIGN, NULL);
3149 if (!ip6_dst_ops_template.kmem_cachep)
3150 goto out;
3151
3152 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3153 if (ret)
3154 goto out_kmem_cache;
3155
3156 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3157 if (ret)
3158 goto out_dst_entries;
3159
3160 ret = register_pernet_subsys(&ip6_route_net_ops);
3161 if (ret)
3162 goto out_register_inetpeer;
3163
3164 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3165
3166 /* Registering of the loopback is done before this portion of code,
3167 * the loopback reference in rt6_info will not be taken, do it
3168 * manually for init_net */
3169 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3170 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3171 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3172 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3173 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3174 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3175 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3176 #endif
3177 ret = fib6_init();
3178 if (ret)
3179 goto out_register_subsys;
3180
3181 ret = xfrm6_init();
3182 if (ret)
3183 goto out_fib6_init;
3184
3185 ret = fib6_rules_init();
3186 if (ret)
3187 goto xfrm6_init;
3188
3189 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3190 if (ret)
3191 goto fib6_rules_init;
3192
3193 ret = -ENOBUFS;
3194 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3195 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3196 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3197 goto out_register_late_subsys;
3198
3199 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3200 if (ret)
3201 goto out_register_late_subsys;
3202
3203out:
3204 return ret;
3205
3206out_register_late_subsys:
3207 unregister_pernet_subsys(&ip6_route_net_late_ops);
3208fib6_rules_init:
3209 fib6_rules_cleanup();
3210xfrm6_init:
3211 xfrm6_fini();
3212out_fib6_init:
3213 fib6_gc_cleanup();
3214out_register_subsys:
3215 unregister_pernet_subsys(&ip6_route_net_ops);
3216out_register_inetpeer:
3217 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3218out_dst_entries:
3219 dst_entries_destroy(&ip6_dst_blackhole_ops);
3220out_kmem_cache:
3221 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3222 goto out;
3223}
3224
3225void ip6_route_cleanup(void)
3226{
3227 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3228 unregister_pernet_subsys(&ip6_route_net_late_ops);
3229 fib6_rules_cleanup();
3230 xfrm6_fini();
3231 fib6_gc_cleanup();
3232 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3233 unregister_pernet_subsys(&ip6_route_net_ops);
3234 dst_entries_destroy(&ip6_dst_blackhole_ops);
3235 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3236}