Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		ROUTE - implementation of the IP router.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
  13 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  14 *
  15 * Fixes:
  16 *		Alan Cox	:	Verify area fixes.
  17 *		Alan Cox	:	cli() protects routing changes
  18 *		Rui Oliveira	:	ICMP routing table updates
  19 *		(rco@di.uminho.pt)	Routing table insertion and update
  20 *		Linus Torvalds	:	Rewrote bits to be sensible
  21 *		Alan Cox	:	Added BSD route gw semantics
  22 *		Alan Cox	:	Super /proc >4K
  23 *		Alan Cox	:	MTU in route table
  24 *		Alan Cox	:	MSS actually. Also added the window
  25 *					clamper.
  26 *		Sam Lantinga	:	Fixed route matching in rt_del()
  27 *		Alan Cox	:	Routing cache support.
  28 *		Alan Cox	:	Removed compatibility cruft.
  29 *		Alan Cox	:	RTF_REJECT support.
  30 *		Alan Cox	:	TCP irtt support.
  31 *		Jonathan Naylor	:	Added Metric support.
  32 *	Miquel van Smoorenburg	:	BSD API fixes.
  33 *	Miquel van Smoorenburg	:	Metrics.
  34 *		Alan Cox	:	Use __u32 properly
  35 *		Alan Cox	:	Aligned routing errors more closely with BSD
  36 *					our system is still very different.
  37 *		Alan Cox	:	Faster /proc handling
  38 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
  39 *					routing caches and better behaviour.
  40 *
  41 *		Olaf Erb	:	irtt wasn't being copied right.
  42 *		Bjorn Ekwall	:	Kerneld route support.
  43 *		Alan Cox	:	Multicast fixed (I hope)
  44 *		Pavel Krauz	:	Limited broadcast fixed
  45 *		Mike McLagan	:	Routing by source
  46 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
  47 *					route.c and rewritten from scratch.
  48 *		Andi Kleen	:	Load-limit warning messages.
  49 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  50 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
  51 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
  52 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
  53 *		Marc Boucher	:	routing by fwmark
  54 *	Robert Olsson		:	Added rt_cache statistics
  55 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
  56 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
  57 *	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
  58 *	Ilia Sotnikov		:	Removed TOS from hash calculations
  59 */
  60
  61#define pr_fmt(fmt) "IPv4: " fmt
  62
  63#include <linux/module.h>
 
  64#include <linux/bitops.h>
 
  65#include <linux/kernel.h>
  66#include <linux/mm.h>
  67#include <linux/memblock.h>
 
  68#include <linux/socket.h>
 
  69#include <linux/errno.h>
  70#include <linux/in.h>
  71#include <linux/inet.h>
  72#include <linux/netdevice.h>
  73#include <linux/proc_fs.h>
  74#include <linux/init.h>
  75#include <linux/skbuff.h>
  76#include <linux/inetdevice.h>
  77#include <linux/igmp.h>
  78#include <linux/pkt_sched.h>
  79#include <linux/mroute.h>
  80#include <linux/netfilter_ipv4.h>
  81#include <linux/random.h>
  82#include <linux/rcupdate.h>
 
  83#include <linux/slab.h>
  84#include <linux/jhash.h>
  85#include <net/dst.h>
  86#include <net/dst_metadata.h>
  87#include <net/inet_dscp.h>
  88#include <net/net_namespace.h>
 
  89#include <net/ip.h>
  90#include <net/route.h>
  91#include <net/inetpeer.h>
  92#include <net/sock.h>
  93#include <net/ip_fib.h>
  94#include <net/nexthop.h>
 
  95#include <net/tcp.h>
  96#include <net/icmp.h>
  97#include <net/xfrm.h>
  98#include <net/lwtunnel.h>
  99#include <net/netevent.h>
 100#include <net/rtnetlink.h>
 101#ifdef CONFIG_SYSCTL
 102#include <linux/sysctl.h>
 103#endif
 104#include <net/secure_seq.h>
 105#include <net/ip_tunnels.h>
 
 106
 107#include "fib_lookup.h"
 108
 
 
 
 109#define RT_GC_TIMEOUT (300*HZ)
 110
 111#define DEFAULT_MIN_PMTU (512 + 20 + 20)
 112#define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
 113#define DEFAULT_MIN_ADVMSS 256
 114static int ip_rt_max_size;
 115static int ip_rt_redirect_number __read_mostly	= 9;
 116static int ip_rt_redirect_load __read_mostly	= HZ / 50;
 117static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
 118static int ip_rt_error_cost __read_mostly	= HZ;
 119static int ip_rt_error_burst __read_mostly	= 5 * HZ;
 
 
 
 120
 121static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
 122
 123/*
 124 *	Interface to generic destination cache.
 125 */
 126
 127INDIRECT_CALLABLE_SCOPE
 128struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 129static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
 130INDIRECT_CALLABLE_SCOPE
 131unsigned int		ipv4_mtu(const struct dst_entry *dst);
 132static void		ipv4_negative_advice(struct sock *sk,
 133					     struct dst_entry *dst);
 134static void		 ipv4_link_failure(struct sk_buff *skb);
 135static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 136					   struct sk_buff *skb, u32 mtu,
 137					   bool confirm_neigh);
 138static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
 139					struct sk_buff *skb);
 140static void		ipv4_dst_destroy(struct dst_entry *dst);
 141
 142static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
 143{
 144	WARN_ON(1);
 145	return NULL;
 146}
 147
 148static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 149					   struct sk_buff *skb,
 150					   const void *daddr);
 151static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
 152
 153static struct dst_ops ipv4_dst_ops = {
 154	.family =		AF_INET,
 155	.check =		ipv4_dst_check,
 156	.default_advmss =	ipv4_default_advmss,
 157	.mtu =			ipv4_mtu,
 158	.cow_metrics =		ipv4_cow_metrics,
 159	.destroy =		ipv4_dst_destroy,
 160	.negative_advice =	ipv4_negative_advice,
 161	.link_failure =		ipv4_link_failure,
 162	.update_pmtu =		ip_rt_update_pmtu,
 163	.redirect =		ip_do_redirect,
 164	.local_out =		__ip_local_out,
 165	.neigh_lookup =		ipv4_neigh_lookup,
 166	.confirm_neigh =	ipv4_confirm_neigh,
 167};
 168
 169#define ECN_OR_COST(class)	TC_PRIO_##class
 170
 171const __u8 ip_tos2prio[16] = {
 172	TC_PRIO_BESTEFFORT,
 173	ECN_OR_COST(BESTEFFORT),
 174	TC_PRIO_BESTEFFORT,
 175	ECN_OR_COST(BESTEFFORT),
 176	TC_PRIO_BULK,
 177	ECN_OR_COST(BULK),
 178	TC_PRIO_BULK,
 179	ECN_OR_COST(BULK),
 180	TC_PRIO_INTERACTIVE,
 181	ECN_OR_COST(INTERACTIVE),
 182	TC_PRIO_INTERACTIVE,
 183	ECN_OR_COST(INTERACTIVE),
 184	TC_PRIO_INTERACTIVE_BULK,
 185	ECN_OR_COST(INTERACTIVE_BULK),
 186	TC_PRIO_INTERACTIVE_BULK,
 187	ECN_OR_COST(INTERACTIVE_BULK)
 188};
 189EXPORT_SYMBOL(ip_tos2prio);
 190
 191static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
 192#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
 193
 194#ifdef CONFIG_PROC_FS
 195static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 196{
 197	if (*pos)
 198		return NULL;
 199	return SEQ_START_TOKEN;
 200}
 201
 202static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 203{
 204	++*pos;
 205	return NULL;
 206}
 207
 208static void rt_cache_seq_stop(struct seq_file *seq, void *v)
 209{
 210}
 211
 212static int rt_cache_seq_show(struct seq_file *seq, void *v)
 213{
 214	if (v == SEQ_START_TOKEN)
 215		seq_printf(seq, "%-127s\n",
 216			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
 217			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
 218			   "HHUptod\tSpecDst");
 219	return 0;
 220}
 221
 222static const struct seq_operations rt_cache_seq_ops = {
 223	.start  = rt_cache_seq_start,
 224	.next   = rt_cache_seq_next,
 225	.stop   = rt_cache_seq_stop,
 226	.show   = rt_cache_seq_show,
 227};
 228
 229static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 230{
 231	int cpu;
 232
 233	if (*pos == 0)
 234		return SEQ_START_TOKEN;
 235
 236	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
 237		if (!cpu_possible(cpu))
 238			continue;
 239		*pos = cpu+1;
 240		return &per_cpu(rt_cache_stat, cpu);
 241	}
 242	return NULL;
 243}
 244
 245static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 246{
 247	int cpu;
 248
 249	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
 250		if (!cpu_possible(cpu))
 251			continue;
 252		*pos = cpu+1;
 253		return &per_cpu(rt_cache_stat, cpu);
 254	}
 255	(*pos)++;
 256	return NULL;
 257
 258}
 259
 260static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
 261{
 262
 263}
 264
 265static int rt_cpu_seq_show(struct seq_file *seq, void *v)
 266{
 267	struct rt_cache_stat *st = v;
 268
 269	if (v == SEQ_START_TOKEN) {
 270		seq_puts(seq, "entries  in_hit   in_slow_tot in_slow_mc in_no_route in_brd   in_martian_dst in_martian_src out_hit  out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
 271		return 0;
 272	}
 273
 274	seq_printf(seq, "%08x %08x %08x    %08x   %08x    %08x %08x       "
 275			"%08x       %08x %08x     %08x    %08x %08x   "
 276			"%08x     %08x        %08x        %08x\n",
 277		   dst_entries_get_slow(&ipv4_dst_ops),
 278		   0, /* st->in_hit */
 279		   st->in_slow_tot,
 280		   st->in_slow_mc,
 281		   st->in_no_route,
 282		   st->in_brd,
 283		   st->in_martian_dst,
 284		   st->in_martian_src,
 285
 286		   0, /* st->out_hit */
 287		   st->out_slow_tot,
 288		   st->out_slow_mc,
 289
 290		   0, /* st->gc_total */
 291		   0, /* st->gc_ignored */
 292		   0, /* st->gc_goal_miss */
 293		   0, /* st->gc_dst_overflow */
 294		   0, /* st->in_hlist_search */
 295		   0  /* st->out_hlist_search */
 296		);
 297	return 0;
 298}
 299
 300static const struct seq_operations rt_cpu_seq_ops = {
 301	.start  = rt_cpu_seq_start,
 302	.next   = rt_cpu_seq_next,
 303	.stop   = rt_cpu_seq_stop,
 304	.show   = rt_cpu_seq_show,
 305};
 306
 307#ifdef CONFIG_IP_ROUTE_CLASSID
 308static int rt_acct_proc_show(struct seq_file *m, void *v)
 309{
 310	struct ip_rt_acct *dst, *src;
 311	unsigned int i, j;
 312
 313	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
 314	if (!dst)
 315		return -ENOMEM;
 316
 317	for_each_possible_cpu(i) {
 318		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
 319		for (j = 0; j < 256; j++) {
 320			dst[j].o_bytes   += src[j].o_bytes;
 321			dst[j].o_packets += src[j].o_packets;
 322			dst[j].i_bytes   += src[j].i_bytes;
 323			dst[j].i_packets += src[j].i_packets;
 324		}
 325	}
 326
 327	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
 328	kfree(dst);
 329	return 0;
 330}
 331#endif
 332
 333static int __net_init ip_rt_do_proc_init(struct net *net)
 334{
 335	struct proc_dir_entry *pde;
 336
 337	pde = proc_create_seq("rt_cache", 0444, net->proc_net,
 338			      &rt_cache_seq_ops);
 339	if (!pde)
 340		goto err1;
 341
 342	pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
 343			      &rt_cpu_seq_ops);
 344	if (!pde)
 345		goto err2;
 346
 347#ifdef CONFIG_IP_ROUTE_CLASSID
 348	pde = proc_create_single("rt_acct", 0, net->proc_net,
 349			rt_acct_proc_show);
 350	if (!pde)
 351		goto err3;
 352#endif
 353	return 0;
 354
 355#ifdef CONFIG_IP_ROUTE_CLASSID
 356err3:
 357	remove_proc_entry("rt_cache", net->proc_net_stat);
 358#endif
 359err2:
 360	remove_proc_entry("rt_cache", net->proc_net);
 361err1:
 362	return -ENOMEM;
 363}
 364
 365static void __net_exit ip_rt_do_proc_exit(struct net *net)
 366{
 367	remove_proc_entry("rt_cache", net->proc_net_stat);
 368	remove_proc_entry("rt_cache", net->proc_net);
 369#ifdef CONFIG_IP_ROUTE_CLASSID
 370	remove_proc_entry("rt_acct", net->proc_net);
 371#endif
 372}
 373
 374static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
 375	.init = ip_rt_do_proc_init,
 376	.exit = ip_rt_do_proc_exit,
 377};
 378
 379static int __init ip_rt_proc_init(void)
 380{
 381	return register_pernet_subsys(&ip_rt_proc_ops);
 382}
 383
 384#else
 385static inline int ip_rt_proc_init(void)
 386{
 387	return 0;
 388}
 389#endif /* CONFIG_PROC_FS */
 390
 391static inline bool rt_is_expired(const struct rtable *rth)
 392{
 393	bool res;
 394
 395	rcu_read_lock();
 396	res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev));
 397	rcu_read_unlock();
 398
 399	return res;
 400}
 401
 402void rt_cache_flush(struct net *net)
 403{
 404	rt_genid_bump_ipv4(net);
 405}
 406
 407static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 408					   struct sk_buff *skb,
 409					   const void *daddr)
 410{
 411	const struct rtable *rt = container_of(dst, struct rtable, dst);
 412	struct net_device *dev = dst->dev;
 413	struct neighbour *n;
 414
 415	rcu_read_lock();
 416
 417	if (likely(rt->rt_gw_family == AF_INET)) {
 418		n = ip_neigh_gw4(dev, rt->rt_gw4);
 419	} else if (rt->rt_gw_family == AF_INET6) {
 420		n = ip_neigh_gw6(dev, &rt->rt_gw6);
 421        } else {
 422		__be32 pkey;
 423
 424		pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
 425		n = ip_neigh_gw4(dev, pkey);
 426	}
 427
 428	if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
 429		n = NULL;
 430
 431	rcu_read_unlock();
 432
 433	return n;
 434}
 435
 436static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
 437{
 438	const struct rtable *rt = container_of(dst, struct rtable, dst);
 439	struct net_device *dev = dst->dev;
 440	const __be32 *pkey = daddr;
 441
 442	if (rt->rt_gw_family == AF_INET) {
 443		pkey = (const __be32 *)&rt->rt_gw4;
 444	} else if (rt->rt_gw_family == AF_INET6) {
 445		return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
 446	} else if (!daddr ||
 447		 (rt->rt_flags &
 448		  (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
 449		return;
 450	}
 451	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
 452}
 453
 454/* Hash tables of size 2048..262144 depending on RAM size.
 455 * Each bucket uses 8 bytes.
 456 */
 457static u32 ip_idents_mask __read_mostly;
 458static atomic_t *ip_idents __read_mostly;
 459static u32 *ip_tstamps __read_mostly;
 460
 461/* In order to protect privacy, we add a perturbation to identifiers
 462 * if one generator is seldom used. This makes hard for an attacker
 463 * to infer how many packets were sent between two points in time.
 464 */
 465static u32 ip_idents_reserve(u32 hash, int segs)
 466{
 467	u32 bucket, old, now = (u32)jiffies;
 468	atomic_t *p_id;
 469	u32 *p_tstamp;
 470	u32 delta = 0;
 471
 472	bucket = hash & ip_idents_mask;
 473	p_tstamp = ip_tstamps + bucket;
 474	p_id = ip_idents + bucket;
 475	old = READ_ONCE(*p_tstamp);
 476
 477	if (old != now && cmpxchg(p_tstamp, old, now) == old)
 478		delta = get_random_u32_below(now - old);
 479
 480	/* If UBSAN reports an error there, please make sure your compiler
 481	 * supports -fno-strict-overflow before reporting it that was a bug
 482	 * in UBSAN, and it has been fixed in GCC-8.
 483	 */
 484	return atomic_add_return(segs + delta, p_id) - segs;
 485}
 
 486
 487void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
 488{
 489	u32 hash, id;
 490
 491	/* Note the following code is not safe, but this is okay. */
 492	if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
 493		get_random_bytes(&net->ipv4.ip_id_key,
 494				 sizeof(net->ipv4.ip_id_key));
 495
 496	hash = siphash_3u32((__force u32)iph->daddr,
 497			    (__force u32)iph->saddr,
 498			    iph->protocol,
 499			    &net->ipv4.ip_id_key);
 500	id = ip_idents_reserve(hash, segs);
 501	iph->id = htons(id);
 502}
 503EXPORT_SYMBOL(__ip_select_ident);
 504
 505static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
 506			     const struct sock *sk, const struct iphdr *iph,
 507			     int oif, __u8 tos, u8 prot, u32 mark,
 508			     int flow_flags)
 
 509{
 510	__u8 scope = RT_SCOPE_UNIVERSE;
 511
 512	if (sk) {
 513		oif = sk->sk_bound_dev_if;
 514		mark = READ_ONCE(sk->sk_mark);
 515		tos = ip_sock_rt_tos(sk);
 516		scope = ip_sock_rt_scope(sk);
 517		prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW :
 518						    sk->sk_protocol;
 519	}
 520
 521	flowi4_init_output(fl4, oif, mark, tos & INET_DSCP_MASK, scope,
 522			   prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
 
 
 
 
 
 
 
 523			   sock_net_uid(net, sk));
 524}
 525
 526static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
 527			       const struct sock *sk)
 528{
 529	const struct net *net = dev_net(skb->dev);
 530	const struct iphdr *iph = ip_hdr(skb);
 531	int oif = skb->dev->ifindex;
 
 532	u8 prot = iph->protocol;
 533	u32 mark = skb->mark;
 534	__u8 tos = iph->tos;
 535
 536	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
 537}
 538
 539static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
 540{
 541	const struct inet_sock *inet = inet_sk(sk);
 542	const struct ip_options_rcu *inet_opt;
 543	__be32 daddr = inet->inet_daddr;
 544
 545	rcu_read_lock();
 546	inet_opt = rcu_dereference(inet->inet_opt);
 547	if (inet_opt && inet_opt->opt.srr)
 548		daddr = inet_opt->opt.faddr;
 549	flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
 550			   ip_sock_rt_tos(sk),
 551			   ip_sock_rt_scope(sk),
 552			   inet_test_bit(HDRINCL, sk) ?
 553				IPPROTO_RAW : sk->sk_protocol,
 554			   inet_sk_flowi_flags(sk),
 555			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
 556	rcu_read_unlock();
 557}
 558
 559static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
 560				 const struct sk_buff *skb)
 561{
 562	if (skb)
 563		build_skb_flow_key(fl4, skb, sk);
 564	else
 565		build_sk_flow_key(fl4, sk);
 566}
 567
 568static DEFINE_SPINLOCK(fnhe_lock);
 569
 570static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
 571{
 572	struct rtable *rt;
 573
 574	rt = rcu_dereference(fnhe->fnhe_rth_input);
 575	if (rt) {
 576		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
 577		dst_dev_put(&rt->dst);
 578		dst_release(&rt->dst);
 579	}
 580	rt = rcu_dereference(fnhe->fnhe_rth_output);
 581	if (rt) {
 582		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
 583		dst_dev_put(&rt->dst);
 584		dst_release(&rt->dst);
 585	}
 586}
 587
 588static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
 589{
 590	struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
 591	struct fib_nh_exception *fnhe, *oldest = NULL;
 592
 593	for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
 594		fnhe = rcu_dereference_protected(*fnhe_p,
 595						 lockdep_is_held(&fnhe_lock));
 596		if (!fnhe)
 597			break;
 598		if (!oldest ||
 599		    time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
 600			oldest = fnhe;
 601			oldest_p = fnhe_p;
 602		}
 603	}
 604	fnhe_flush_routes(oldest);
 605	*oldest_p = oldest->fnhe_next;
 606	kfree_rcu(oldest, rcu);
 607}
 608
 609static u32 fnhe_hashfun(__be32 daddr)
 610{
 611	static siphash_aligned_key_t fnhe_hash_key;
 612	u64 hval;
 613
 614	net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
 615	hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
 616	return hash_64(hval, FNHE_HASH_SHIFT);
 617}
 618
 619static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
 620{
 621	rt->rt_pmtu = fnhe->fnhe_pmtu;
 622	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
 623	rt->dst.expires = fnhe->fnhe_expires;
 624
 625	if (fnhe->fnhe_gw) {
 626		rt->rt_flags |= RTCF_REDIRECTED;
 627		rt->rt_uses_gateway = 1;
 628		rt->rt_gw_family = AF_INET;
 629		rt->rt_gw4 = fnhe->fnhe_gw;
 630	}
 631}
 632
 633static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
 634				  __be32 gw, u32 pmtu, bool lock,
 635				  unsigned long expires)
 636{
 637	struct fnhe_hash_bucket *hash;
 638	struct fib_nh_exception *fnhe;
 639	struct rtable *rt;
 640	u32 genid, hval;
 641	unsigned int i;
 642	int depth;
 643
 644	genid = fnhe_genid(dev_net(nhc->nhc_dev));
 645	hval = fnhe_hashfun(daddr);
 646
 647	spin_lock_bh(&fnhe_lock);
 648
 649	hash = rcu_dereference(nhc->nhc_exceptions);
 650	if (!hash) {
 651		hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
 652		if (!hash)
 653			goto out_unlock;
 654		rcu_assign_pointer(nhc->nhc_exceptions, hash);
 655	}
 656
 657	hash += hval;
 658
 659	depth = 0;
 660	for (fnhe = rcu_dereference(hash->chain); fnhe;
 661	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
 662		if (fnhe->fnhe_daddr == daddr)
 663			break;
 664		depth++;
 665	}
 666
 667	if (fnhe) {
 668		if (fnhe->fnhe_genid != genid)
 669			fnhe->fnhe_genid = genid;
 670		if (gw)
 671			fnhe->fnhe_gw = gw;
 672		if (pmtu) {
 673			fnhe->fnhe_pmtu = pmtu;
 674			fnhe->fnhe_mtu_locked = lock;
 675		}
 676		fnhe->fnhe_expires = max(1UL, expires);
 677		/* Update all cached dsts too */
 678		rt = rcu_dereference(fnhe->fnhe_rth_input);
 679		if (rt)
 680			fill_route_from_fnhe(rt, fnhe);
 681		rt = rcu_dereference(fnhe->fnhe_rth_output);
 682		if (rt)
 683			fill_route_from_fnhe(rt, fnhe);
 684	} else {
 685		/* Randomize max depth to avoid some side channels attacks. */
 686		int max_depth = FNHE_RECLAIM_DEPTH +
 687				get_random_u32_below(FNHE_RECLAIM_DEPTH);
 688
 689		while (depth > max_depth) {
 690			fnhe_remove_oldest(hash);
 691			depth--;
 692		}
 693
 694		fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
 695		if (!fnhe)
 696			goto out_unlock;
 697
 698		fnhe->fnhe_next = hash->chain;
 699
 700		fnhe->fnhe_genid = genid;
 701		fnhe->fnhe_daddr = daddr;
 702		fnhe->fnhe_gw = gw;
 703		fnhe->fnhe_pmtu = pmtu;
 704		fnhe->fnhe_mtu_locked = lock;
 705		fnhe->fnhe_expires = max(1UL, expires);
 706
 707		rcu_assign_pointer(hash->chain, fnhe);
 708
 709		/* Exception created; mark the cached routes for the nexthop
 710		 * stale, so anyone caching it rechecks if this exception
 711		 * applies to them.
 712		 */
 713		rt = rcu_dereference(nhc->nhc_rth_input);
 714		if (rt)
 715			rt->dst.obsolete = DST_OBSOLETE_KILL;
 716
 717		for_each_possible_cpu(i) {
 718			struct rtable __rcu **prt;
 719
 720			prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
 721			rt = rcu_dereference(*prt);
 722			if (rt)
 723				rt->dst.obsolete = DST_OBSOLETE_KILL;
 724		}
 725	}
 726
 727	fnhe->fnhe_stamp = jiffies;
 728
 729out_unlock:
 730	spin_unlock_bh(&fnhe_lock);
 731}
 732
 733static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
 734			     bool kill_route)
 735{
 736	__be32 new_gw = icmp_hdr(skb)->un.gateway;
 737	__be32 old_gw = ip_hdr(skb)->saddr;
 738	struct net_device *dev = skb->dev;
 739	struct in_device *in_dev;
 740	struct fib_result res;
 741	struct neighbour *n;
 742	struct net *net;
 743
 744	switch (icmp_hdr(skb)->code & 7) {
 745	case ICMP_REDIR_NET:
 746	case ICMP_REDIR_NETTOS:
 747	case ICMP_REDIR_HOST:
 748	case ICMP_REDIR_HOSTTOS:
 749		break;
 750
 751	default:
 752		return;
 753	}
 754
 755	if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
 756		return;
 757
 758	in_dev = __in_dev_get_rcu(dev);
 759	if (!in_dev)
 760		return;
 761
 762	net = dev_net(dev);
 763	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
 764	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
 765	    ipv4_is_zeronet(new_gw))
 766		goto reject_redirect;
 767
 768	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
 769		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
 770			goto reject_redirect;
 771		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
 772			goto reject_redirect;
 773	} else {
 774		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
 775			goto reject_redirect;
 776	}
 777
 778	n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
 779	if (!n)
 780		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
 781	if (!IS_ERR(n)) {
 782		if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
 783			neigh_event_send(n, NULL);
 784		} else {
 785			if (fib_lookup(net, fl4, &res, 0) == 0) {
 786				struct fib_nh_common *nhc;
 787
 788				fib_select_path(net, &res, fl4, skb);
 789				nhc = FIB_RES_NHC(res);
 790				update_or_create_fnhe(nhc, fl4->daddr, new_gw,
 791						0, false,
 792						jiffies + ip_rt_gc_timeout);
 793			}
 794			if (kill_route)
 795				rt->dst.obsolete = DST_OBSOLETE_KILL;
 796			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
 797		}
 798		neigh_release(n);
 799	}
 800	return;
 801
 802reject_redirect:
 803#ifdef CONFIG_IP_ROUTE_VERBOSE
 804	if (IN_DEV_LOG_MARTIANS(in_dev)) {
 805		const struct iphdr *iph = (const struct iphdr *) skb->data;
 806		__be32 daddr = iph->daddr;
 807		__be32 saddr = iph->saddr;
 808
 809		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
 810				     "  Advised path = %pI4 -> %pI4\n",
 811				     &old_gw, dev->name, &new_gw,
 812				     &saddr, &daddr);
 813	}
 814#endif
 815	;
 816}
 817
 818static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
 819{
 820	struct rtable *rt;
 821	struct flowi4 fl4;
 822	const struct iphdr *iph = (const struct iphdr *) skb->data;
 823	struct net *net = dev_net(skb->dev);
 824	int oif = skb->dev->ifindex;
 
 825	u8 prot = iph->protocol;
 826	u32 mark = skb->mark;
 827	__u8 tos = iph->tos;
 828
 829	rt = dst_rtable(dst);
 830
 831	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
 832	__ip_do_redirect(rt, skb, &fl4, true);
 833}
 834
 835static void ipv4_negative_advice(struct sock *sk,
 836				 struct dst_entry *dst)
 837{
 838	struct rtable *rt = dst_rtable(dst);
 
 839
 840	if ((dst->obsolete > 0) ||
 841	    (rt->rt_flags & RTCF_REDIRECTED) ||
 842	    rt->dst.expires)
 843		sk_dst_reset(sk);
 
 
 
 
 
 
 
 844}
 845
 846/*
 847 * Algorithm:
 848 *	1. The first ip_rt_redirect_number redirects are sent
 849 *	   with exponential backoff, then we stop sending them at all,
 850 *	   assuming that the host ignores our redirects.
 851 *	2. If we did not see packets requiring redirects
 852 *	   during ip_rt_redirect_silence, we assume that the host
 853 *	   forgot redirected route and start to send redirects again.
 854 *
 855 * This algorithm is much cheaper and more intelligent than dumb load limiting
 856 * in icmp.c.
 857 *
 858 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
 859 * and "frag. need" (breaks PMTU discovery) in icmp.c.
 860 */
 861
 862void ip_rt_send_redirect(struct sk_buff *skb)
 863{
 864	struct rtable *rt = skb_rtable(skb);
 865	struct in_device *in_dev;
 866	struct inet_peer *peer;
 867	struct net *net;
 868	int log_martians;
 869	int vif;
 870
 871	rcu_read_lock();
 872	in_dev = __in_dev_get_rcu(rt->dst.dev);
 873	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
 874		rcu_read_unlock();
 875		return;
 876	}
 877	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
 878	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
 
 879
 880	net = dev_net(rt->dst.dev);
 881	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif);
 882	if (!peer) {
 883		rcu_read_unlock();
 884		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
 885			  rt_nexthop(rt, ip_hdr(skb)->daddr));
 886		return;
 887	}
 888
 889	/* No redirected packets during ip_rt_redirect_silence;
 890	 * reset the algorithm.
 891	 */
 892	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
 893		peer->rate_tokens = 0;
 894		peer->n_redirects = 0;
 895	}
 896
 897	/* Too many ignored redirects; do not send anything
 898	 * set dst.rate_last to the last seen redirected packet.
 899	 */
 900	if (peer->n_redirects >= ip_rt_redirect_number) {
 901		peer->rate_last = jiffies;
 902		goto out_unlock;
 903	}
 904
 905	/* Check for load limit; set rate_last to the latest sent
 906	 * redirect.
 907	 */
 908	if (peer->n_redirects == 0 ||
 909	    time_after(jiffies,
 910		       (peer->rate_last +
 911			(ip_rt_redirect_load << peer->n_redirects)))) {
 912		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
 913
 914		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
 915		peer->rate_last = jiffies;
 916		++peer->n_redirects;
 917		if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
 
 918		    peer->n_redirects == ip_rt_redirect_number)
 919			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
 920					     &ip_hdr(skb)->saddr, inet_iif(skb),
 921					     &ip_hdr(skb)->daddr, &gw);
 
 922	}
 923out_unlock:
 924	rcu_read_unlock();
 925}
 926
 927static int ip_error(struct sk_buff *skb)
 928{
 929	struct rtable *rt = skb_rtable(skb);
 930	struct net_device *dev = skb->dev;
 931	struct in_device *in_dev;
 932	struct inet_peer *peer;
 933	unsigned long now;
 934	struct net *net;
 935	SKB_DR(reason);
 936	bool send;
 937	int code;
 938
 939	if (netif_is_l3_master(skb->dev)) {
 940		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
 941		if (!dev)
 942			goto out;
 943	}
 944
 945	in_dev = __in_dev_get_rcu(dev);
 946
 947	/* IP on this device is disabled. */
 948	if (!in_dev)
 949		goto out;
 950
 951	net = dev_net(rt->dst.dev);
 952	if (!IN_DEV_FORWARD(in_dev)) {
 953		switch (rt->dst.error) {
 954		case EHOSTUNREACH:
 955			SKB_DR_SET(reason, IP_INADDRERRORS);
 956			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
 957			break;
 958
 959		case ENETUNREACH:
 960			SKB_DR_SET(reason, IP_INNOROUTES);
 961			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 962			break;
 963		}
 964		goto out;
 965	}
 966
 967	switch (rt->dst.error) {
 968	case EINVAL:
 969	default:
 970		goto out;
 971	case EHOSTUNREACH:
 972		code = ICMP_HOST_UNREACH;
 973		break;
 974	case ENETUNREACH:
 975		code = ICMP_NET_UNREACH;
 976		SKB_DR_SET(reason, IP_INNOROUTES);
 977		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 978		break;
 979	case EACCES:
 980		code = ICMP_PKT_FILTERED;
 981		break;
 982	}
 983
 984	rcu_read_lock();
 985	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
 986			       l3mdev_master_ifindex_rcu(skb->dev));
 
 987	send = true;
 988	if (peer) {
 989		now = jiffies;
 990		peer->rate_tokens += now - peer->rate_last;
 991		if (peer->rate_tokens > ip_rt_error_burst)
 992			peer->rate_tokens = ip_rt_error_burst;
 993		peer->rate_last = now;
 994		if (peer->rate_tokens >= ip_rt_error_cost)
 995			peer->rate_tokens -= ip_rt_error_cost;
 996		else
 997			send = false;
 
 998	}
 999	rcu_read_unlock();
1000
1001	if (send)
1002		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1003
1004out:	kfree_skb_reason(skb, reason);
1005	return 0;
1006}
1007
1008static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1009{
1010	struct dst_entry *dst = &rt->dst;
 
1011	struct fib_result res;
1012	bool lock = false;
1013	struct net *net;
1014	u32 old_mtu;
1015
1016	if (ip_mtu_locked(dst))
1017		return;
1018
1019	old_mtu = ipv4_mtu(dst);
1020	if (old_mtu < mtu)
1021		return;
1022
1023	rcu_read_lock();
1024	net = dev_net_rcu(dst->dev);
1025	if (mtu < net->ipv4.ip_rt_min_pmtu) {
1026		lock = true;
1027		mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
1028	}
1029
1030	if (rt->rt_pmtu == mtu && !lock &&
1031	    time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
1032		goto out;
1033
 
1034	if (fib_lookup(net, fl4, &res, 0) == 0) {
1035		struct fib_nh_common *nhc;
1036
1037		fib_select_path(net, &res, fl4, NULL);
1038#ifdef CONFIG_IP_ROUTE_MULTIPATH
1039		if (fib_info_num_path(res.fi) > 1) {
1040			int nhsel;
1041
1042			for (nhsel = 0; nhsel < fib_info_num_path(res.fi); nhsel++) {
1043				nhc = fib_info_nhc(res.fi, nhsel);
1044				update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1045						      jiffies + net->ipv4.ip_rt_mtu_expires);
1046			}
1047			goto out;
1048		}
1049#endif /* CONFIG_IP_ROUTE_MULTIPATH */
1050		nhc = FIB_RES_NHC(res);
1051		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1052				      jiffies + net->ipv4.ip_rt_mtu_expires);
1053	}
1054out:
1055	rcu_read_unlock();
1056}
1057
1058static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1059			      struct sk_buff *skb, u32 mtu,
1060			      bool confirm_neigh)
1061{
1062	struct rtable *rt = dst_rtable(dst);
1063	struct flowi4 fl4;
1064
1065	ip_rt_build_flow_key(&fl4, sk, skb);
1066
1067	/* Don't make lookup fail for bridged encapsulations */
1068	if (skb && netif_is_any_bridge_port(skb->dev))
1069		fl4.flowi4_oif = 0;
1070
1071	__ip_rt_update_pmtu(rt, &fl4, mtu);
1072}
1073
1074void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1075		      int oif, u8 protocol)
1076{
1077	const struct iphdr *iph = (const struct iphdr *)skb->data;
1078	struct flowi4 fl4;
1079	struct rtable *rt;
1080	u32 mark = IP4_REPLY_MARK(net, skb->mark);
1081
1082	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
1083			 0);
1084	rt = __ip_route_output_key(net, &fl4);
1085	if (!IS_ERR(rt)) {
1086		__ip_rt_update_pmtu(rt, &fl4, mtu);
1087		ip_rt_put(rt);
1088	}
1089}
1090EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1091
1092static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093{
1094	const struct iphdr *iph = (const struct iphdr *)skb->data;
1095	struct flowi4 fl4;
1096	struct rtable *rt;
1097
1098	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1099
1100	if (!fl4.flowi4_mark)
1101		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1102
1103	rt = __ip_route_output_key(sock_net(sk), &fl4);
1104	if (!IS_ERR(rt)) {
1105		__ip_rt_update_pmtu(rt, &fl4, mtu);
1106		ip_rt_put(rt);
1107	}
1108}
1109
1110void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1111{
1112	const struct iphdr *iph = (const struct iphdr *)skb->data;
1113	struct flowi4 fl4;
1114	struct rtable *rt;
1115	struct dst_entry *odst = NULL;
1116	bool new = false;
1117	struct net *net = sock_net(sk);
1118
1119	bh_lock_sock(sk);
1120
1121	if (!ip_sk_accept_pmtu(sk))
1122		goto out;
1123
1124	odst = sk_dst_get(sk);
1125
1126	if (sock_owned_by_user(sk) || !odst) {
1127		__ipv4_sk_update_pmtu(skb, sk, mtu);
1128		goto out;
1129	}
1130
1131	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1132
1133	rt = dst_rtable(odst);
1134	if (odst->obsolete && !odst->ops->check(odst, 0)) {
1135		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1136		if (IS_ERR(rt))
1137			goto out;
1138
1139		new = true;
1140	}
1141
1142	__ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
1143
1144	if (!dst_check(&rt->dst, 0)) {
1145		if (new)
1146			dst_release(&rt->dst);
1147
1148		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1149		if (IS_ERR(rt))
1150			goto out;
1151
1152		new = true;
1153	}
1154
1155	if (new)
1156		sk_dst_set(sk, &rt->dst);
1157
1158out:
1159	bh_unlock_sock(sk);
1160	dst_release(odst);
1161}
1162EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1163
1164void ipv4_redirect(struct sk_buff *skb, struct net *net,
1165		   int oif, u8 protocol)
1166{
1167	const struct iphdr *iph = (const struct iphdr *)skb->data;
1168	struct flowi4 fl4;
1169	struct rtable *rt;
1170
1171	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
 
1172	rt = __ip_route_output_key(net, &fl4);
1173	if (!IS_ERR(rt)) {
1174		__ip_do_redirect(rt, skb, &fl4, false);
1175		ip_rt_put(rt);
1176	}
1177}
1178EXPORT_SYMBOL_GPL(ipv4_redirect);
1179
1180void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1181{
1182	const struct iphdr *iph = (const struct iphdr *)skb->data;
1183	struct flowi4 fl4;
1184	struct rtable *rt;
1185	struct net *net = sock_net(sk);
1186
1187	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1188	rt = __ip_route_output_key(net, &fl4);
1189	if (!IS_ERR(rt)) {
1190		__ip_do_redirect(rt, skb, &fl4, false);
1191		ip_rt_put(rt);
1192	}
1193}
1194EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1195
1196INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1197							 u32 cookie)
1198{
1199	struct rtable *rt = dst_rtable(dst);
1200
1201	/* All IPV4 dsts are created with ->obsolete set to the value
1202	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1203	 * into this function always.
1204	 *
1205	 * When a PMTU/redirect information update invalidates a route,
1206	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1207	 * DST_OBSOLETE_DEAD.
1208	 */
1209	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1210		return NULL;
1211	return dst;
1212}
1213EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1214
1215static void ipv4_send_dest_unreach(struct sk_buff *skb)
1216{
1217	struct net_device *dev;
1218	struct ip_options opt;
1219	int res;
1220
1221	/* Recompile ip options since IPCB may not be valid anymore.
1222	 * Also check we have a reasonable ipv4 header.
1223	 */
1224	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1225	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1226		return;
1227
1228	memset(&opt, 0, sizeof(opt));
1229	if (ip_hdr(skb)->ihl > 5) {
1230		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1231			return;
1232		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1233
1234		rcu_read_lock();
1235		dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
1236		res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
1237		rcu_read_unlock();
1238
1239		if (res)
1240			return;
1241	}
1242	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1243}
1244
1245static void ipv4_link_failure(struct sk_buff *skb)
1246{
1247	struct rtable *rt;
1248
1249	ipv4_send_dest_unreach(skb);
1250
1251	rt = skb_rtable(skb);
1252	if (rt)
1253		dst_set_expires(&rt->dst, 0);
1254}
1255
1256static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1257{
1258	pr_debug("%s: %pI4 -> %pI4, %s\n",
1259		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1260		 skb->dev ? skb->dev->name : "?");
1261	kfree_skb(skb);
1262	WARN_ON(1);
1263	return 0;
1264}
1265
1266/*
1267 * We do not cache source address of outgoing interface,
1268 * because it is used only by IP RR, TS and SRR options,
1269 * so that it out of fast path.
1270 *
1271 * BTW remember: "addr" is allowed to be not aligned
1272 * in IP options!
1273 */
1274
1275void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1276{
1277	__be32 src;
1278
1279	if (rt_is_output_route(rt))
1280		src = ip_hdr(skb)->saddr;
1281	else {
1282		struct fib_result res;
1283		struct iphdr *iph = ip_hdr(skb);
1284		struct flowi4 fl4 = {
1285			.daddr = iph->daddr,
1286			.saddr = iph->saddr,
1287			.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)),
1288			.flowi4_oif = rt->dst.dev->ifindex,
1289			.flowi4_iif = skb->dev->ifindex,
1290			.flowi4_mark = skb->mark,
1291		};
1292
1293		rcu_read_lock();
1294		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1295			src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1296		else
1297			src = inet_select_addr(rt->dst.dev,
1298					       rt_nexthop(rt, iph->daddr),
1299					       RT_SCOPE_UNIVERSE);
1300		rcu_read_unlock();
1301	}
1302	memcpy(addr, &src, 4);
1303}
1304
1305#ifdef CONFIG_IP_ROUTE_CLASSID
1306static void set_class_tag(struct rtable *rt, u32 tag)
1307{
1308	if (!(rt->dst.tclassid & 0xFFFF))
1309		rt->dst.tclassid |= tag & 0xFFFF;
1310	if (!(rt->dst.tclassid & 0xFFFF0000))
1311		rt->dst.tclassid |= tag & 0xFFFF0000;
1312}
1313#endif
1314
1315static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1316{
1317	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1318	unsigned int advmss;
1319	struct net *net;
1320
1321	rcu_read_lock();
1322	net = dev_net_rcu(dst->dev);
1323	advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1324				   net->ipv4.ip_rt_min_advmss);
1325	rcu_read_unlock();
1326
1327	return min(advmss, IPV4_MAX_PMTU - header_size);
1328}
1329
1330INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1331{
1332	return ip_dst_mtu_maybe_forward(dst, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333}
1334EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1335
1336static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1337{
1338	struct fnhe_hash_bucket *hash;
1339	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1340	u32 hval = fnhe_hashfun(daddr);
1341
1342	spin_lock_bh(&fnhe_lock);
1343
1344	hash = rcu_dereference_protected(nhc->nhc_exceptions,
1345					 lockdep_is_held(&fnhe_lock));
1346	hash += hval;
1347
1348	fnhe_p = &hash->chain;
1349	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1350	while (fnhe) {
1351		if (fnhe->fnhe_daddr == daddr) {
1352			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1353				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1354			/* set fnhe_daddr to 0 to ensure it won't bind with
1355			 * new dsts in rt_bind_exception().
1356			 */
1357			fnhe->fnhe_daddr = 0;
1358			fnhe_flush_routes(fnhe);
1359			kfree_rcu(fnhe, rcu);
1360			break;
1361		}
1362		fnhe_p = &fnhe->fnhe_next;
1363		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1364						 lockdep_is_held(&fnhe_lock));
1365	}
1366
1367	spin_unlock_bh(&fnhe_lock);
1368}
1369
1370static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1371					       __be32 daddr)
1372{
1373	struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1374	struct fib_nh_exception *fnhe;
1375	u32 hval;
1376
1377	if (!hash)
1378		return NULL;
1379
1380	hval = fnhe_hashfun(daddr);
1381
1382	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1383	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1384		if (fnhe->fnhe_daddr == daddr) {
1385			if (fnhe->fnhe_expires &&
1386			    time_after(jiffies, fnhe->fnhe_expires)) {
1387				ip_del_fnhe(nhc, daddr);
1388				break;
1389			}
1390			return fnhe;
1391		}
1392	}
1393	return NULL;
1394}
1395
1396/* MTU selection:
1397 * 1. mtu on route is locked - use it
1398 * 2. mtu from nexthop exception
1399 * 3. mtu from egress device
1400 */
1401
1402u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1403{
1404	struct fib_nh_common *nhc = res->nhc;
1405	struct net_device *dev = nhc->nhc_dev;
1406	struct fib_info *fi = res->fi;
1407	u32 mtu = 0;
1408
1409	if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
1410	    fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1411		mtu = fi->fib_mtu;
1412
1413	if (likely(!mtu)) {
1414		struct fib_nh_exception *fnhe;
1415
1416		fnhe = find_exception(nhc, daddr);
1417		if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1418			mtu = fnhe->fnhe_pmtu;
1419	}
1420
1421	if (likely(!mtu))
1422		mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1423
1424	return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1425}
1426
1427static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1428			      __be32 daddr, const bool do_cache)
1429{
1430	bool ret = false;
1431
1432	spin_lock_bh(&fnhe_lock);
1433
1434	if (daddr == fnhe->fnhe_daddr) {
1435		struct rtable __rcu **porig;
1436		struct rtable *orig;
1437		int genid = fnhe_genid(dev_net(rt->dst.dev));
1438
1439		if (rt_is_input_route(rt))
1440			porig = &fnhe->fnhe_rth_input;
1441		else
1442			porig = &fnhe->fnhe_rth_output;
1443		orig = rcu_dereference(*porig);
1444
1445		if (fnhe->fnhe_genid != genid) {
1446			fnhe->fnhe_genid = genid;
1447			fnhe->fnhe_gw = 0;
1448			fnhe->fnhe_pmtu = 0;
1449			fnhe->fnhe_expires = 0;
1450			fnhe->fnhe_mtu_locked = false;
1451			fnhe_flush_routes(fnhe);
1452			orig = NULL;
1453		}
1454		fill_route_from_fnhe(rt, fnhe);
1455		if (!rt->rt_gw4) {
1456			rt->rt_gw4 = daddr;
1457			rt->rt_gw_family = AF_INET;
1458		}
1459
1460		if (do_cache) {
1461			dst_hold(&rt->dst);
1462			rcu_assign_pointer(*porig, rt);
1463			if (orig) {
1464				dst_dev_put(&orig->dst);
1465				dst_release(&orig->dst);
1466			}
1467			ret = true;
1468		}
1469
1470		fnhe->fnhe_stamp = jiffies;
1471	}
1472	spin_unlock_bh(&fnhe_lock);
1473
1474	return ret;
1475}
1476
1477static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1478{
1479	struct rtable *orig, *prev, **p;
1480	bool ret = true;
1481
1482	if (rt_is_input_route(rt)) {
1483		p = (struct rtable **)&nhc->nhc_rth_input;
1484	} else {
1485		p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1486	}
1487	orig = *p;
1488
1489	/* hold dst before doing cmpxchg() to avoid race condition
1490	 * on this dst
1491	 */
1492	dst_hold(&rt->dst);
1493	prev = cmpxchg(p, orig, rt);
1494	if (prev == orig) {
1495		if (orig) {
1496			rt_add_uncached_list(orig);
1497			dst_release(&orig->dst);
1498		}
1499	} else {
1500		dst_release(&rt->dst);
1501		ret = false;
1502	}
1503
1504	return ret;
1505}
1506
1507struct uncached_list {
1508	spinlock_t		lock;
1509	struct list_head	head;
1510};
1511
1512static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1513
1514void rt_add_uncached_list(struct rtable *rt)
1515{
1516	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1517
1518	rt->dst.rt_uncached_list = ul;
1519
1520	spin_lock_bh(&ul->lock);
1521	list_add_tail(&rt->dst.rt_uncached, &ul->head);
1522	spin_unlock_bh(&ul->lock);
1523}
1524
1525void rt_del_uncached_list(struct rtable *rt)
1526{
1527	if (!list_empty(&rt->dst.rt_uncached)) {
1528		struct uncached_list *ul = rt->dst.rt_uncached_list;
1529
1530		spin_lock_bh(&ul->lock);
1531		list_del_init(&rt->dst.rt_uncached);
1532		spin_unlock_bh(&ul->lock);
1533	}
1534}
1535
1536static void ipv4_dst_destroy(struct dst_entry *dst)
1537{
 
 
1538	ip_dst_metrics_put(dst);
1539	rt_del_uncached_list(dst_rtable(dst));
1540}
1541
1542void rt_flush_dev(struct net_device *dev)
1543{
1544	struct rtable *rt, *safe;
1545	int cpu;
1546
1547	for_each_possible_cpu(cpu) {
1548		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1549
1550		if (list_empty(&ul->head))
1551			continue;
1552
1553		spin_lock_bh(&ul->lock);
1554		list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
1555			if (rt->dst.dev != dev)
1556				continue;
1557			rt->dst.dev = blackhole_netdev;
1558			netdev_ref_replace(dev, blackhole_netdev,
1559					   &rt->dst.dev_tracker, GFP_ATOMIC);
1560			list_del_init(&rt->dst.rt_uncached);
1561		}
1562		spin_unlock_bh(&ul->lock);
1563	}
1564}
1565
1566static bool rt_cache_valid(const struct rtable *rt)
1567{
1568	return	rt &&
1569		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1570		!rt_is_expired(rt);
1571}
1572
1573static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1574			   const struct fib_result *res,
1575			   struct fib_nh_exception *fnhe,
1576			   struct fib_info *fi, u16 type, u32 itag,
1577			   const bool do_cache)
1578{
1579	bool cached = false;
1580
1581	if (fi) {
1582		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1583
1584		if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1585			rt->rt_uses_gateway = 1;
1586			rt->rt_gw_family = nhc->nhc_gw_family;
1587			/* only INET and INET6 are supported */
1588			if (likely(nhc->nhc_gw_family == AF_INET))
1589				rt->rt_gw4 = nhc->nhc_gw.ipv4;
1590			else
1591				rt->rt_gw6 = nhc->nhc_gw.ipv6;
1592		}
1593
1594		ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1595
1596#ifdef CONFIG_IP_ROUTE_CLASSID
1597		if (nhc->nhc_family == AF_INET) {
1598			struct fib_nh *nh;
1599
1600			nh = container_of(nhc, struct fib_nh, nh_common);
1601			rt->dst.tclassid = nh->nh_tclassid;
1602		}
1603#endif
1604		rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1605		if (unlikely(fnhe))
1606			cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1607		else if (do_cache)
1608			cached = rt_cache_route(nhc, rt);
1609		if (unlikely(!cached)) {
1610			/* Routes we intend to cache in nexthop exception or
1611			 * FIB nexthop have the DST_NOCACHE bit clear.
1612			 * However, if we are unsuccessful at storing this
1613			 * route into the cache we really need to set it.
1614			 */
1615			if (!rt->rt_gw4) {
1616				rt->rt_gw_family = AF_INET;
1617				rt->rt_gw4 = daddr;
1618			}
1619			rt_add_uncached_list(rt);
1620		}
1621	} else
1622		rt_add_uncached_list(rt);
1623
1624#ifdef CONFIG_IP_ROUTE_CLASSID
1625#ifdef CONFIG_IP_MULTIPLE_TABLES
1626	set_class_tag(rt, res->tclassid);
1627#endif
1628	set_class_tag(rt, itag);
1629#endif
1630}
1631
1632struct rtable *rt_dst_alloc(struct net_device *dev,
1633			    unsigned int flags, u16 type,
1634			    bool noxfrm)
1635{
1636	struct rtable *rt;
1637
1638	rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
 
1639		       (noxfrm ? DST_NOXFRM : 0));
1640
1641	if (rt) {
1642		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1643		rt->rt_flags = flags;
1644		rt->rt_type = type;
1645		rt->rt_is_input = 0;
1646		rt->rt_iif = 0;
1647		rt->rt_pmtu = 0;
1648		rt->rt_mtu_locked = 0;
1649		rt->rt_uses_gateway = 0;
1650		rt->rt_gw_family = 0;
1651		rt->rt_gw4 = 0;
 
1652
1653		rt->dst.output = ip_output;
1654		if (flags & RTCF_LOCAL)
1655			rt->dst.input = ip_local_deliver;
1656	}
1657
1658	return rt;
1659}
1660EXPORT_SYMBOL(rt_dst_alloc);
1661
1662struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1663{
1664	struct rtable *new_rt;
1665
1666	new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1667			   rt->dst.flags);
1668
1669	if (new_rt) {
1670		new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1671		new_rt->rt_flags = rt->rt_flags;
1672		new_rt->rt_type = rt->rt_type;
1673		new_rt->rt_is_input = rt->rt_is_input;
1674		new_rt->rt_iif = rt->rt_iif;
1675		new_rt->rt_pmtu = rt->rt_pmtu;
1676		new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1677		new_rt->rt_gw_family = rt->rt_gw_family;
1678		if (rt->rt_gw_family == AF_INET)
1679			new_rt->rt_gw4 = rt->rt_gw4;
1680		else if (rt->rt_gw_family == AF_INET6)
1681			new_rt->rt_gw6 = rt->rt_gw6;
 
1682
1683		new_rt->dst.input = rt->dst.input;
1684		new_rt->dst.output = rt->dst.output;
1685		new_rt->dst.error = rt->dst.error;
1686		new_rt->dst.lastuse = jiffies;
1687		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1688	}
1689	return new_rt;
1690}
1691EXPORT_SYMBOL(rt_dst_clone);
1692
1693/* called in rcu_read_lock() section */
1694enum skb_drop_reason
1695ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1696		      dscp_t dscp, struct net_device *dev,
1697		      struct in_device *in_dev, u32 *itag)
1698{
1699	enum skb_drop_reason reason;
1700
1701	/* Primary sanity checks. */
1702	if (!in_dev)
1703		return SKB_DROP_REASON_NOT_SPECIFIED;
1704
1705	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1706		return SKB_DROP_REASON_IP_INVALID_SOURCE;
1707
1708	if (skb->protocol != htons(ETH_P_IP))
1709		return SKB_DROP_REASON_INVALID_PROTO;
 
1710
1711	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1712		return SKB_DROP_REASON_IP_LOCALNET;
1713
1714	if (ipv4_is_zeronet(saddr)) {
1715		if (!ipv4_is_local_multicast(daddr) &&
1716		    ip_hdr(skb)->protocol != IPPROTO_IGMP)
1717			return SKB_DROP_REASON_IP_INVALID_SOURCE;
1718	} else {
1719		reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0,
1720						    dev, in_dev, itag);
1721		if (reason)
1722			return reason;
1723	}
1724	return SKB_NOT_DROPPED_YET;
1725}
1726
1727/* called in rcu_read_lock() section */
1728static enum skb_drop_reason
1729ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1730		  dscp_t dscp, struct net_device *dev, int our)
1731{
1732	struct in_device *in_dev = __in_dev_get_rcu(dev);
1733	unsigned int flags = RTCF_MULTICAST;
1734	enum skb_drop_reason reason;
1735	struct rtable *rth;
1736	u32 itag = 0;
 
1737
1738	reason = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev,
1739				       &itag);
1740	if (reason)
1741		return reason;
1742
1743	if (our)
1744		flags |= RTCF_LOCAL;
1745
1746	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1747		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1748
1749	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1750			   false);
1751	if (!rth)
1752		return SKB_DROP_REASON_NOMEM;
1753
1754#ifdef CONFIG_IP_ROUTE_CLASSID
1755	rth->dst.tclassid = itag;
1756#endif
1757	rth->dst.output = ip_rt_bug;
1758	rth->rt_is_input= 1;
1759
1760#ifdef CONFIG_IP_MROUTE
1761	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1762		rth->dst.input = ip_mr_input;
1763#endif
1764	RT_CACHE_STAT_INC(in_slow_mc);
1765
1766	skb_dst_drop(skb);
1767	skb_dst_set(skb, &rth->dst);
1768	return SKB_NOT_DROPPED_YET;
1769}
1770
1771
1772static void ip_handle_martian_source(struct net_device *dev,
1773				     struct in_device *in_dev,
1774				     struct sk_buff *skb,
1775				     __be32 daddr,
1776				     __be32 saddr)
1777{
1778	RT_CACHE_STAT_INC(in_martian_src);
1779#ifdef CONFIG_IP_ROUTE_VERBOSE
1780	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1781		/*
1782		 *	RFC1812 recommendation, if source is martian,
1783		 *	the only hint is MAC header.
1784		 */
1785		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1786			&daddr, &saddr, dev->name);
1787		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1788			print_hex_dump(KERN_WARNING, "ll header: ",
1789				       DUMP_PREFIX_OFFSET, 16, 1,
1790				       skb_mac_header(skb),
1791				       dev->hard_header_len, false);
1792		}
1793	}
1794#endif
1795}
1796
1797/* called in rcu_read_lock() section */
1798static enum skb_drop_reason
1799__mkroute_input(struct sk_buff *skb, const struct fib_result *res,
1800		struct in_device *in_dev, __be32 daddr,
1801		__be32 saddr, dscp_t dscp)
1802{
1803	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1804	struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1805	struct net_device *dev = nhc->nhc_dev;
1806	struct fib_nh_exception *fnhe;
1807	struct rtable *rth;
1808	int err;
1809	struct in_device *out_dev;
1810	bool do_cache;
1811	u32 itag = 0;
1812
1813	/* get a working reference to the output device */
1814	out_dev = __in_dev_get_rcu(dev);
1815	if (!out_dev) {
1816		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1817		return reason;
1818	}
1819
1820	err = fib_validate_source(skb, saddr, daddr, dscp, FIB_RES_OIF(*res),
1821				  in_dev->dev, in_dev, &itag);
1822	if (err < 0) {
1823		reason = -err;
1824		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1825					 saddr);
1826
1827		goto cleanup;
1828	}
1829
1830	do_cache = res->fi && !itag;
1831	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1832	    skb->protocol == htons(ETH_P_IP)) {
1833		__be32 gw;
1834
1835		gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1836		if (IN_DEV_SHARED_MEDIA(out_dev) ||
1837		    inet_addr_onlink(out_dev, saddr, gw))
1838			IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1839	}
1840
1841	if (skb->protocol != htons(ETH_P_IP)) {
1842		/* Not IP (i.e. ARP). Do not create route, if it is
1843		 * invalid for proxy arp. DNAT routes are always valid.
1844		 *
1845		 * Proxy arp feature have been extended to allow, ARP
1846		 * replies back to the same interface, to support
1847		 * Private VLAN switch technologies. See arp.c.
1848		 */
1849		if (out_dev == in_dev &&
1850		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1851			reason = SKB_DROP_REASON_ARP_PVLAN_DISABLE;
1852			goto cleanup;
1853		}
1854	}
1855
1856	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1857		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1858
1859	fnhe = find_exception(nhc, daddr);
1860	if (do_cache) {
1861		if (fnhe)
1862			rth = rcu_dereference(fnhe->fnhe_rth_input);
1863		else
1864			rth = rcu_dereference(nhc->nhc_rth_input);
1865		if (rt_cache_valid(rth)) {
1866			skb_dst_set_noref(skb, &rth->dst);
1867			goto out;
1868		}
1869	}
1870
1871	rth = rt_dst_alloc(out_dev->dev, 0, res->type,
 
1872			   IN_DEV_ORCONF(out_dev, NOXFRM));
1873	if (!rth) {
1874		reason = SKB_DROP_REASON_NOMEM;
1875		goto cleanup;
1876	}
1877
1878	rth->rt_is_input = 1;
1879	RT_CACHE_STAT_INC(in_slow_tot);
1880
1881	rth->dst.input = ip_forward;
1882
1883	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1884		       do_cache);
1885	lwtunnel_set_redirect(&rth->dst);
1886	skb_dst_set(skb, &rth->dst);
1887out:
1888	reason = SKB_NOT_DROPPED_YET;
1889cleanup:
1890	return reason;
1891}
1892
1893#ifdef CONFIG_IP_ROUTE_MULTIPATH
1894/* To make ICMP packets follow the right flow, the multipath hash is
1895 * calculated from the inner IP addresses.
1896 */
1897static void ip_multipath_l3_keys(const struct sk_buff *skb,
1898				 struct flow_keys *hash_keys)
1899{
1900	const struct iphdr *outer_iph = ip_hdr(skb);
1901	const struct iphdr *key_iph = outer_iph;
1902	const struct iphdr *inner_iph;
1903	const struct icmphdr *icmph;
1904	struct iphdr _inner_iph;
1905	struct icmphdr _icmph;
1906
1907	if (likely(outer_iph->protocol != IPPROTO_ICMP))
1908		goto out;
1909
1910	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1911		goto out;
1912
1913	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1914				   &_icmph);
1915	if (!icmph)
1916		goto out;
1917
1918	if (!icmp_is_err(icmph->type))
1919		goto out;
1920
1921	inner_iph = skb_header_pointer(skb,
1922				       outer_iph->ihl * 4 + sizeof(_icmph),
1923				       sizeof(_inner_iph), &_inner_iph);
1924	if (!inner_iph)
1925		goto out;
1926
1927	key_iph = inner_iph;
1928out:
1929	hash_keys->addrs.v4addrs.src = key_iph->saddr;
1930	hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1931}
1932
1933static u32 fib_multipath_custom_hash_outer(const struct net *net,
1934					   const struct sk_buff *skb,
1935					   bool *p_has_inner)
1936{
1937	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1938	struct flow_keys keys, hash_keys;
1939
1940	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1941		return 0;
1942
1943	memset(&hash_keys, 0, sizeof(hash_keys));
1944	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1945
1946	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1947	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1948		hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1949	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1950		hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1951	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1952		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1953	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1954		hash_keys.ports.src = keys.ports.src;
1955	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1956		hash_keys.ports.dst = keys.ports.dst;
1957
1958	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1959	return fib_multipath_hash_from_keys(net, &hash_keys);
1960}
1961
1962static u32 fib_multipath_custom_hash_inner(const struct net *net,
1963					   const struct sk_buff *skb,
1964					   bool has_inner)
1965{
1966	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1967	struct flow_keys keys, hash_keys;
1968
1969	/* We assume the packet carries an encapsulation, but if none was
1970	 * encountered during dissection of the outer flow, then there is no
1971	 * point in calling the flow dissector again.
1972	 */
1973	if (!has_inner)
1974		return 0;
1975
1976	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1977		return 0;
1978
1979	memset(&hash_keys, 0, sizeof(hash_keys));
1980	skb_flow_dissect_flow_keys(skb, &keys, 0);
1981
1982	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1983		return 0;
1984
1985	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1986		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1987		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1988			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1989		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1990			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1991	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1992		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1993		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1994			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1995		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1996			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1997		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1998			hash_keys.tags.flow_label = keys.tags.flow_label;
1999	}
2000
2001	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2002		hash_keys.basic.ip_proto = keys.basic.ip_proto;
2003	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2004		hash_keys.ports.src = keys.ports.src;
2005	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2006		hash_keys.ports.dst = keys.ports.dst;
2007
2008	return fib_multipath_hash_from_keys(net, &hash_keys);
2009}
2010
2011static u32 fib_multipath_custom_hash_skb(const struct net *net,
2012					 const struct sk_buff *skb)
2013{
2014	u32 mhash, mhash_inner;
2015	bool has_inner = true;
2016
2017	mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
2018	mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
2019
2020	return jhash_2words(mhash, mhash_inner, 0);
2021}
2022
2023static u32 fib_multipath_custom_hash_fl4(const struct net *net,
2024					 const struct flowi4 *fl4)
2025{
2026	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
2027	struct flow_keys hash_keys;
2028
2029	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2030		return 0;
2031
2032	memset(&hash_keys, 0, sizeof(hash_keys));
2033	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2034	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2035		hash_keys.addrs.v4addrs.src = fl4->saddr;
2036	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2037		hash_keys.addrs.v4addrs.dst = fl4->daddr;
2038	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2039		hash_keys.basic.ip_proto = fl4->flowi4_proto;
2040	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2041		hash_keys.ports.src = fl4->fl4_sport;
2042	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2043		hash_keys.ports.dst = fl4->fl4_dport;
2044
2045	return fib_multipath_hash_from_keys(net, &hash_keys);
2046}
2047
2048/* if skb is set it will be used and fl4 can be NULL */
2049int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2050		       const struct sk_buff *skb, struct flow_keys *flkeys)
2051{
2052	u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2053	struct flow_keys hash_keys;
2054	u32 mhash = 0;
2055
2056	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
2057	case 0:
2058		memset(&hash_keys, 0, sizeof(hash_keys));
2059		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2060		if (skb) {
2061			ip_multipath_l3_keys(skb, &hash_keys);
2062		} else {
2063			hash_keys.addrs.v4addrs.src = fl4->saddr;
2064			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2065		}
2066		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2067		break;
2068	case 1:
2069		/* skb is currently provided only when forwarding */
2070		if (skb) {
2071			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2072			struct flow_keys keys;
2073
2074			/* short-circuit if we already have L4 hash present */
2075			if (skb->l4_hash)
2076				return skb_get_hash_raw(skb) >> 1;
2077
2078			memset(&hash_keys, 0, sizeof(hash_keys));
2079
2080			if (!flkeys) {
2081				skb_flow_dissect_flow_keys(skb, &keys, flag);
2082				flkeys = &keys;
2083			}
2084
2085			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2086			hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2087			hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2088			hash_keys.ports.src = flkeys->ports.src;
2089			hash_keys.ports.dst = flkeys->ports.dst;
2090			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2091		} else {
2092			memset(&hash_keys, 0, sizeof(hash_keys));
2093			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2094			hash_keys.addrs.v4addrs.src = fl4->saddr;
2095			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2096			hash_keys.ports.src = fl4->fl4_sport;
2097			hash_keys.ports.dst = fl4->fl4_dport;
2098			hash_keys.basic.ip_proto = fl4->flowi4_proto;
2099		}
2100		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2101		break;
2102	case 2:
2103		memset(&hash_keys, 0, sizeof(hash_keys));
2104		/* skb is currently provided only when forwarding */
2105		if (skb) {
2106			struct flow_keys keys;
2107
2108			skb_flow_dissect_flow_keys(skb, &keys, 0);
2109			/* Inner can be v4 or v6 */
2110			if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2111				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2112				hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2113				hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2114			} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2115				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2116				hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2117				hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2118				hash_keys.tags.flow_label = keys.tags.flow_label;
2119				hash_keys.basic.ip_proto = keys.basic.ip_proto;
2120			} else {
2121				/* Same as case 0 */
2122				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2123				ip_multipath_l3_keys(skb, &hash_keys);
2124			}
2125		} else {
2126			/* Same as case 0 */
2127			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2128			hash_keys.addrs.v4addrs.src = fl4->saddr;
2129			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2130		}
2131		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2132		break;
2133	case 3:
2134		if (skb)
2135			mhash = fib_multipath_custom_hash_skb(net, skb);
2136		else
2137			mhash = fib_multipath_custom_hash_fl4(net, fl4);
2138		break;
2139	}
2140
2141	if (multipath_hash)
2142		mhash = jhash_2words(mhash, multipath_hash, 0);
2143
2144	return mhash >> 1;
2145}
2146#endif /* CONFIG_IP_ROUTE_MULTIPATH */
2147
2148static enum skb_drop_reason
2149ip_mkroute_input(struct sk_buff *skb, struct fib_result *res,
2150		 struct in_device *in_dev, __be32 daddr,
2151		 __be32 saddr, dscp_t dscp, struct flow_keys *hkeys)
 
2152{
2153#ifdef CONFIG_IP_ROUTE_MULTIPATH
2154	if (res->fi && fib_info_num_path(res->fi) > 1) {
2155		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2156
2157		fib_select_multipath(res, h);
2158		IPCB(skb)->flags |= IPSKB_MULTIPATH;
2159	}
2160#endif
2161
2162	/* create a routing cache entry */
2163	return __mkroute_input(skb, res, in_dev, daddr, saddr, dscp);
2164}
2165
2166/* Implements all the saddr-related checks as ip_route_input_slow(),
2167 * assuming daddr is valid and the destination is not a local broadcast one.
2168 * Uses the provided hint instead of performing a route lookup.
2169 */
2170enum skb_drop_reason
2171ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2172		  dscp_t dscp, struct net_device *dev,
2173		  const struct sk_buff *hint)
2174{
2175	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
2176	struct in_device *in_dev = __in_dev_get_rcu(dev);
2177	struct rtable *rt = skb_rtable(hint);
2178	struct net *net = dev_net(dev);
 
2179	u32 tag = 0;
2180
2181	if (!in_dev)
2182		return reason;
2183
2184	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) {
2185		reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
2186		goto martian_source;
2187	}
2188
2189	if (ipv4_is_zeronet(saddr)) {
2190		reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
2191		goto martian_source;
2192	}
2193
2194	if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) {
2195		reason = SKB_DROP_REASON_IP_LOCALNET;
2196		goto martian_source;
2197	}
2198
2199	if (rt->rt_type != RTN_LOCAL)
2200		goto skip_validate_source;
2201
2202	reason = fib_validate_source_reason(skb, saddr, daddr, dscp, 0, dev,
2203					    in_dev, &tag);
2204	if (reason)
2205		goto martian_source;
2206
2207skip_validate_source:
2208	skb_dst_copy(skb, hint);
2209	return SKB_NOT_DROPPED_YET;
2210
2211martian_source:
2212	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2213	return reason;
2214}
2215
2216/* get device for dst_alloc with local routes */
2217static struct net_device *ip_rt_get_dev(struct net *net,
2218					const struct fib_result *res)
2219{
2220	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2221	struct net_device *dev = NULL;
2222
2223	if (nhc)
2224		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2225
2226	return dev ? : net->loopback_dev;
2227}
2228
2229/*
2230 *	NOTE. We drop all the packets that has local source
2231 *	addresses, because every properly looped back packet
2232 *	must have correct destination already attached by output routine.
2233 *	Changes in the enforced policies must be applied also to
2234 *	ip_route_use_hint().
2235 *
2236 *	Such approach solves two big problems:
2237 *	1. Not simplex devices are handled properly.
2238 *	2. IP spoofing attempts are filtered with 100% of guarantee.
2239 *	called with rcu_read_lock()
2240 */
2241
2242static enum skb_drop_reason
2243ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2244		    dscp_t dscp, struct net_device *dev,
2245		    struct fib_result *res)
2246{
2247	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
2248	struct in_device *in_dev = __in_dev_get_rcu(dev);
2249	struct flow_keys *flkeys = NULL, _flkeys;
2250	struct net    *net = dev_net(dev);
2251	struct ip_tunnel_info *tun_info;
2252	int		err = -EINVAL;
2253	unsigned int	flags = 0;
2254	u32		itag = 0;
2255	struct rtable	*rth;
2256	struct flowi4	fl4;
2257	bool do_cache = true;
2258
2259	/* IP on this device is disabled. */
2260
2261	if (!in_dev)
2262		goto out;
2263
2264	/* Check for the most weird martians, which can be not detected
2265	 * by fib_lookup.
2266	 */
2267
2268	tun_info = skb_tunnel_info(skb);
2269	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2270		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2271	else
2272		fl4.flowi4_tun_key.tun_id = 0;
2273	skb_dst_drop(skb);
2274
2275	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) {
2276		reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
2277		goto martian_source;
2278	}
2279
2280	res->fi = NULL;
2281	res->table = NULL;
2282	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2283		goto brd_input;
2284
2285	/* Accept zero addresses only to limited broadcast;
2286	 * I even do not know to fix it or not. Waiting for complains :-)
2287	 */
2288	if (ipv4_is_zeronet(saddr)) {
2289		reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
2290		goto martian_source;
2291	}
2292
2293	if (ipv4_is_zeronet(daddr)) {
2294		reason = SKB_DROP_REASON_IP_INVALID_DEST;
2295		goto martian_destination;
2296	}
2297
2298	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2299	 * and call it once if daddr or/and saddr are loopback addresses
2300	 */
2301	if (ipv4_is_loopback(daddr)) {
2302		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) {
2303			reason = SKB_DROP_REASON_IP_LOCALNET;
2304			goto martian_destination;
2305		}
2306	} else if (ipv4_is_loopback(saddr)) {
2307		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) {
2308			reason = SKB_DROP_REASON_IP_LOCALNET;
2309			goto martian_source;
2310		}
2311	}
2312
2313	/*
2314	 *	Now we are ready to route packet.
2315	 */
2316	fl4.flowi4_l3mdev = 0;
2317	fl4.flowi4_oif = 0;
2318	fl4.flowi4_iif = dev->ifindex;
2319	fl4.flowi4_mark = skb->mark;
2320	fl4.flowi4_tos = inet_dscp_to_dsfield(dscp);
2321	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2322	fl4.flowi4_flags = 0;
2323	fl4.daddr = daddr;
2324	fl4.saddr = saddr;
2325	fl4.flowi4_uid = sock_net_uid(net, NULL);
2326	fl4.flowi4_multipath_hash = 0;
2327
2328	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2329		flkeys = &_flkeys;
2330	} else {
2331		fl4.flowi4_proto = 0;
2332		fl4.fl4_sport = 0;
2333		fl4.fl4_dport = 0;
2334	}
2335
2336	err = fib_lookup(net, &fl4, res, 0);
2337	if (err != 0) {
2338		if (!IN_DEV_FORWARD(in_dev))
2339			err = -EHOSTUNREACH;
2340		goto no_route;
2341	}
2342
2343	if (res->type == RTN_BROADCAST) {
2344		if (IN_DEV_BFORWARD(in_dev))
2345			goto make_route;
2346		/* not do cache if bc_forwarding is enabled */
2347		if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
2348			do_cache = false;
2349		goto brd_input;
2350	}
2351
2352	err = -EINVAL;
2353	if (res->type == RTN_LOCAL) {
2354		reason = fib_validate_source_reason(skb, saddr, daddr, dscp,
2355						    0, dev, in_dev, &itag);
2356		if (reason)
2357			goto martian_source;
2358		goto local_input;
2359	}
2360
2361	if (!IN_DEV_FORWARD(in_dev)) {
2362		err = -EHOSTUNREACH;
2363		goto no_route;
2364	}
2365	if (res->type != RTN_UNICAST) {
2366		reason = SKB_DROP_REASON_IP_INVALID_DEST;
2367		goto martian_destination;
2368	}
2369
2370make_route:
2371	reason = ip_mkroute_input(skb, res, in_dev, daddr, saddr, dscp,
2372				  flkeys);
2373
2374out:
2375	return reason;
2376
2377brd_input:
2378	if (skb->protocol != htons(ETH_P_IP)) {
2379		reason = SKB_DROP_REASON_INVALID_PROTO;
2380		goto out;
2381	}
2382
2383	if (!ipv4_is_zeronet(saddr)) {
2384		reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0,
2385						    dev, in_dev, &itag);
2386		if (reason)
2387			goto martian_source;
2388	}
2389	flags |= RTCF_BROADCAST;
2390	res->type = RTN_BROADCAST;
2391	RT_CACHE_STAT_INC(in_brd);
2392
2393local_input:
2394	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
2395		IPCB(skb)->flags |= IPSKB_NOPOLICY;
2396
2397	do_cache &= res->fi && !itag;
2398	if (do_cache) {
2399		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2400
2401		rth = rcu_dereference(nhc->nhc_rth_input);
2402		if (rt_cache_valid(rth)) {
2403			skb_dst_set_noref(skb, &rth->dst);
2404			reason = SKB_NOT_DROPPED_YET;
2405			goto out;
2406		}
2407	}
2408
2409	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2410			   flags | RTCF_LOCAL, res->type, false);
 
2411	if (!rth)
2412		goto e_nobufs;
2413
2414	rth->dst.output= ip_rt_bug;
2415#ifdef CONFIG_IP_ROUTE_CLASSID
2416	rth->dst.tclassid = itag;
2417#endif
2418	rth->rt_is_input = 1;
2419
2420	RT_CACHE_STAT_INC(in_slow_tot);
2421	if (res->type == RTN_UNREACHABLE) {
2422		rth->dst.input= ip_error;
2423		rth->dst.error= -err;
2424		rth->rt_flags	&= ~RTCF_LOCAL;
2425	}
2426
2427	if (do_cache) {
2428		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2429
2430		rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2431		if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2432			WARN_ON(rth->dst.input == lwtunnel_input);
2433			rth->dst.lwtstate->orig_input = rth->dst.input;
2434			rth->dst.input = lwtunnel_input;
2435		}
2436
2437		if (unlikely(!rt_cache_route(nhc, rth)))
2438			rt_add_uncached_list(rth);
2439	}
2440	skb_dst_set(skb, &rth->dst);
2441	reason = SKB_NOT_DROPPED_YET;
2442	goto out;
2443
2444no_route:
2445	RT_CACHE_STAT_INC(in_no_route);
2446	res->type = RTN_UNREACHABLE;
2447	res->fi = NULL;
2448	res->table = NULL;
2449	goto local_input;
2450
2451	/*
2452	 *	Do not cache martian addresses: they should be logged (RFC1812)
2453	 */
2454martian_destination:
2455	RT_CACHE_STAT_INC(in_martian_dst);
2456#ifdef CONFIG_IP_ROUTE_VERBOSE
2457	if (IN_DEV_LOG_MARTIANS(in_dev))
2458		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2459				     &daddr, &saddr, dev->name);
2460#endif
 
 
 
2461	goto out;
2462
2463e_nobufs:
2464	reason = SKB_DROP_REASON_NOMEM;
2465	goto out;
2466
2467martian_source:
2468	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2469	goto out;
2470}
2471
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2472/* called with rcu_read_lock held */
2473static enum skb_drop_reason
2474ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2475		   dscp_t dscp, struct net_device *dev,
2476		   struct fib_result *res)
2477{
2478	/* Multicast recognition logic is moved from route cache to here.
2479	 * The problem was that too many Ethernet cards have broken/missing
2480	 * hardware multicast filters :-( As result the host on multicasting
2481	 * network acquires a lot of useless route cache entries, sort of
2482	 * SDR messages from all the world. Now we try to get rid of them.
2483	 * Really, provided software IP multicast filter is organized
2484	 * reasonably (at least, hashed), it does not result in a slowdown
2485	 * comparing with route cache reject entries.
2486	 * Note, that multicast routers are not affected, because
2487	 * route cache entry is created eventually.
2488	 */
2489	if (ipv4_is_multicast(daddr)) {
2490		enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
2491		struct in_device *in_dev = __in_dev_get_rcu(dev);
2492		int our = 0;
 
2493
2494		if (!in_dev)
2495			return reason;
2496
2497		our = ip_check_mc_rcu(in_dev, daddr, saddr,
2498				      ip_hdr(skb)->protocol);
2499
2500		/* check l3 master if no match yet */
2501		if (!our && netif_is_l3_slave(dev)) {
2502			struct in_device *l3_in_dev;
2503
2504			l3_in_dev = __in_dev_get_rcu(skb->dev);
2505			if (l3_in_dev)
2506				our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2507						      ip_hdr(skb)->protocol);
2508		}
2509
2510		if (our
2511#ifdef CONFIG_IP_MROUTE
2512			||
2513		    (!ipv4_is_local_multicast(daddr) &&
2514		     IN_DEV_MFORWARD(in_dev))
2515#endif
2516		   ) {
2517			reason = ip_route_input_mc(skb, daddr, saddr, dscp,
2518						   dev, our);
2519		}
2520		return reason;
2521	}
2522
2523	return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res);
2524}
2525
2526enum skb_drop_reason ip_route_input_noref(struct sk_buff *skb, __be32 daddr,
2527					  __be32 saddr, dscp_t dscp,
2528					  struct net_device *dev)
2529{
2530	enum skb_drop_reason reason;
2531	struct fib_result res;
2532
2533	rcu_read_lock();
2534	reason = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res);
2535	rcu_read_unlock();
2536
2537	return reason;
2538}
2539EXPORT_SYMBOL(ip_route_input_noref);
2540
2541/* called with rcu_read_lock() */
2542static struct rtable *__mkroute_output(const struct fib_result *res,
2543				       const struct flowi4 *fl4, int orig_oif,
2544				       struct net_device *dev_out,
2545				       unsigned int flags)
2546{
2547	struct fib_info *fi = res->fi;
2548	struct fib_nh_exception *fnhe;
2549	struct in_device *in_dev;
2550	u16 type = res->type;
2551	struct rtable *rth;
2552	bool do_cache;
2553
2554	in_dev = __in_dev_get_rcu(dev_out);
2555	if (!in_dev)
2556		return ERR_PTR(-EINVAL);
2557
2558	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2559		if (ipv4_is_loopback(fl4->saddr) &&
2560		    !(dev_out->flags & IFF_LOOPBACK) &&
2561		    !netif_is_l3_master(dev_out))
2562			return ERR_PTR(-EINVAL);
2563
2564	if (ipv4_is_lbcast(fl4->daddr))
2565		type = RTN_BROADCAST;
2566	else if (ipv4_is_multicast(fl4->daddr))
2567		type = RTN_MULTICAST;
2568	else if (ipv4_is_zeronet(fl4->daddr))
2569		return ERR_PTR(-EINVAL);
2570
2571	if (dev_out->flags & IFF_LOOPBACK)
2572		flags |= RTCF_LOCAL;
2573
2574	do_cache = true;
2575	if (type == RTN_BROADCAST) {
2576		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2577		fi = NULL;
2578	} else if (type == RTN_MULTICAST) {
2579		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2580		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2581				     fl4->flowi4_proto))
2582			flags &= ~RTCF_LOCAL;
2583		else
2584			do_cache = false;
2585		/* If multicast route do not exist use
2586		 * default one, but do not gateway in this case.
2587		 * Yes, it is hack.
2588		 */
2589		if (fi && res->prefixlen < 4)
2590			fi = NULL;
2591	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2592		   (orig_oif != dev_out->ifindex)) {
2593		/* For local routes that require a particular output interface
2594		 * we do not want to cache the result.  Caching the result
2595		 * causes incorrect behaviour when there are multiple source
2596		 * addresses on the interface, the end result being that if the
2597		 * intended recipient is waiting on that interface for the
2598		 * packet he won't receive it because it will be delivered on
2599		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2600		 * be set to the loopback interface as well.
2601		 */
2602		do_cache = false;
2603	}
2604
2605	fnhe = NULL;
2606	do_cache &= fi != NULL;
2607	if (fi) {
2608		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2609		struct rtable __rcu **prth;
2610
2611		fnhe = find_exception(nhc, fl4->daddr);
2612		if (!do_cache)
2613			goto add;
2614		if (fnhe) {
2615			prth = &fnhe->fnhe_rth_output;
2616		} else {
2617			if (unlikely(fl4->flowi4_flags &
2618				     FLOWI_FLAG_KNOWN_NH &&
2619				     !(nhc->nhc_gw_family &&
2620				       nhc->nhc_scope == RT_SCOPE_LINK))) {
2621				do_cache = false;
2622				goto add;
2623			}
2624			prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2625		}
2626		rth = rcu_dereference(*prth);
2627		if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2628			return rth;
2629	}
2630
2631add:
2632	rth = rt_dst_alloc(dev_out, flags, type,
 
2633			   IN_DEV_ORCONF(in_dev, NOXFRM));
2634	if (!rth)
2635		return ERR_PTR(-ENOBUFS);
2636
2637	rth->rt_iif = orig_oif;
2638
2639	RT_CACHE_STAT_INC(out_slow_tot);
2640
2641	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2642		if (flags & RTCF_LOCAL &&
2643		    !(dev_out->flags & IFF_LOOPBACK)) {
2644			rth->dst.output = ip_mc_output;
2645			RT_CACHE_STAT_INC(out_slow_mc);
2646		}
2647#ifdef CONFIG_IP_MROUTE
2648		if (type == RTN_MULTICAST) {
2649			if (IN_DEV_MFORWARD(in_dev) &&
2650			    !ipv4_is_local_multicast(fl4->daddr)) {
2651				rth->dst.input = ip_mr_input;
2652				rth->dst.output = ip_mc_output;
2653			}
2654		}
2655#endif
2656	}
2657
2658	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2659	lwtunnel_set_redirect(&rth->dst);
2660
2661	return rth;
2662}
2663
2664/*
2665 * Major route resolver routine.
2666 */
2667
2668struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2669					const struct sk_buff *skb)
2670{
 
2671	struct fib_result res = {
2672		.type		= RTN_UNSPEC,
2673		.fi		= NULL,
2674		.table		= NULL,
2675		.tclassid	= 0,
2676	};
2677	struct rtable *rth;
2678
2679	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2680	fl4->flowi4_tos &= INET_DSCP_MASK;
 
 
2681
2682	rcu_read_lock();
2683	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2684	rcu_read_unlock();
2685
2686	return rth;
2687}
2688EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2689
2690struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2691					    struct fib_result *res,
2692					    const struct sk_buff *skb)
2693{
2694	struct net_device *dev_out = NULL;
2695	int orig_oif = fl4->flowi4_oif;
2696	unsigned int flags = 0;
2697	struct rtable *rth;
2698	int err;
2699
2700	if (fl4->saddr) {
2701		if (ipv4_is_multicast(fl4->saddr) ||
2702		    ipv4_is_lbcast(fl4->saddr) ||
2703		    ipv4_is_zeronet(fl4->saddr)) {
2704			rth = ERR_PTR(-EINVAL);
2705			goto out;
2706		}
2707
2708		rth = ERR_PTR(-ENETUNREACH);
2709
2710		/* I removed check for oif == dev_out->oif here.
2711		 * It was wrong for two reasons:
2712		 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2713		 *    is assigned to multiple interfaces.
2714		 * 2. Moreover, we are allowed to send packets with saddr
2715		 *    of another iface. --ANK
2716		 */
2717
2718		if (fl4->flowi4_oif == 0 &&
2719		    (ipv4_is_multicast(fl4->daddr) ||
2720		     ipv4_is_lbcast(fl4->daddr))) {
2721			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2722			dev_out = __ip_dev_find(net, fl4->saddr, false);
2723			if (!dev_out)
2724				goto out;
2725
2726			/* Special hack: user can direct multicasts
2727			 * and limited broadcast via necessary interface
2728			 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2729			 * This hack is not just for fun, it allows
2730			 * vic,vat and friends to work.
2731			 * They bind socket to loopback, set ttl to zero
2732			 * and expect that it will work.
2733			 * From the viewpoint of routing cache they are broken,
2734			 * because we are not allowed to build multicast path
2735			 * with loopback source addr (look, routing cache
2736			 * cannot know, that ttl is zero, so that packet
2737			 * will not leave this host and route is valid).
2738			 * Luckily, this hack is good workaround.
2739			 */
2740
2741			fl4->flowi4_oif = dev_out->ifindex;
2742			goto make_route;
2743		}
2744
2745		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2746			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2747			if (!__ip_dev_find(net, fl4->saddr, false))
2748				goto out;
2749		}
2750	}
2751
2752
2753	if (fl4->flowi4_oif) {
2754		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2755		rth = ERR_PTR(-ENODEV);
2756		if (!dev_out)
2757			goto out;
2758
2759		/* RACE: Check return value of inet_select_addr instead. */
2760		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2761			rth = ERR_PTR(-ENETUNREACH);
2762			goto out;
2763		}
2764		if (ipv4_is_local_multicast(fl4->daddr) ||
2765		    ipv4_is_lbcast(fl4->daddr) ||
2766		    fl4->flowi4_proto == IPPROTO_IGMP) {
2767			if (!fl4->saddr)
2768				fl4->saddr = inet_select_addr(dev_out, 0,
2769							      RT_SCOPE_LINK);
2770			goto make_route;
2771		}
2772		if (!fl4->saddr) {
2773			if (ipv4_is_multicast(fl4->daddr))
2774				fl4->saddr = inet_select_addr(dev_out, 0,
2775							      fl4->flowi4_scope);
2776			else if (!fl4->daddr)
2777				fl4->saddr = inet_select_addr(dev_out, 0,
2778							      RT_SCOPE_HOST);
2779		}
2780	}
2781
2782	if (!fl4->daddr) {
2783		fl4->daddr = fl4->saddr;
2784		if (!fl4->daddr)
2785			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2786		dev_out = net->loopback_dev;
2787		fl4->flowi4_oif = LOOPBACK_IFINDEX;
2788		res->type = RTN_LOCAL;
2789		flags |= RTCF_LOCAL;
2790		goto make_route;
2791	}
2792
2793	err = fib_lookup(net, fl4, res, 0);
2794	if (err) {
2795		res->fi = NULL;
2796		res->table = NULL;
2797		if (fl4->flowi4_oif &&
2798		    (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
 
2799			/* Apparently, routing tables are wrong. Assume,
2800			 * that the destination is on link.
2801			 *
2802			 * WHY? DW.
2803			 * Because we are allowed to send to iface
2804			 * even if it has NO routes and NO assigned
2805			 * addresses. When oif is specified, routing
2806			 * tables are looked up with only one purpose:
2807			 * to catch if destination is gatewayed, rather than
2808			 * direct. Moreover, if MSG_DONTROUTE is set,
2809			 * we send packet, ignoring both routing tables
2810			 * and ifaddr state. --ANK
2811			 *
2812			 *
2813			 * We could make it even if oif is unknown,
2814			 * likely IPv6, but we do not.
2815			 */
2816
2817			if (fl4->saddr == 0)
2818				fl4->saddr = inet_select_addr(dev_out, 0,
2819							      RT_SCOPE_LINK);
2820			res->type = RTN_UNICAST;
2821			goto make_route;
2822		}
2823		rth = ERR_PTR(err);
2824		goto out;
2825	}
2826
2827	if (res->type == RTN_LOCAL) {
2828		if (!fl4->saddr) {
2829			if (res->fi->fib_prefsrc)
2830				fl4->saddr = res->fi->fib_prefsrc;
2831			else
2832				fl4->saddr = fl4->daddr;
2833		}
2834
2835		/* L3 master device is the loopback for that domain */
2836		dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2837			net->loopback_dev;
2838
2839		/* make sure orig_oif points to fib result device even
2840		 * though packet rx/tx happens over loopback or l3mdev
2841		 */
2842		orig_oif = FIB_RES_OIF(*res);
2843
2844		fl4->flowi4_oif = dev_out->ifindex;
2845		flags |= RTCF_LOCAL;
2846		goto make_route;
2847	}
2848
2849	fib_select_path(net, res, fl4, skb);
2850
2851	dev_out = FIB_RES_DEV(*res);
2852
2853make_route:
2854	rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2855
2856out:
2857	return rth;
2858}
2859
2860static struct dst_ops ipv4_dst_blackhole_ops = {
2861	.family			= AF_INET,
2862	.default_advmss		= ipv4_default_advmss,
2863	.neigh_lookup		= ipv4_neigh_lookup,
2864	.check			= dst_blackhole_check,
2865	.cow_metrics		= dst_blackhole_cow_metrics,
2866	.update_pmtu		= dst_blackhole_update_pmtu,
2867	.redirect		= dst_blackhole_redirect,
2868	.mtu			= dst_blackhole_mtu,
2869};
2870
2871struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2872{
2873	struct rtable *ort = dst_rtable(dst_orig);
2874	struct rtable *rt;
2875
2876	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
2877	if (rt) {
2878		struct dst_entry *new = &rt->dst;
2879
2880		new->__use = 1;
2881		new->input = dst_discard;
2882		new->output = dst_discard_out;
2883
2884		new->dev = net->loopback_dev;
2885		netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC);
 
2886
2887		rt->rt_is_input = ort->rt_is_input;
2888		rt->rt_iif = ort->rt_iif;
2889		rt->rt_pmtu = ort->rt_pmtu;
2890		rt->rt_mtu_locked = ort->rt_mtu_locked;
2891
2892		rt->rt_genid = rt_genid_ipv4(net);
2893		rt->rt_flags = ort->rt_flags;
2894		rt->rt_type = ort->rt_type;
2895		rt->rt_uses_gateway = ort->rt_uses_gateway;
2896		rt->rt_gw_family = ort->rt_gw_family;
2897		if (rt->rt_gw_family == AF_INET)
2898			rt->rt_gw4 = ort->rt_gw4;
2899		else if (rt->rt_gw_family == AF_INET6)
2900			rt->rt_gw6 = ort->rt_gw6;
 
 
2901	}
2902
2903	dst_release(dst_orig);
2904
2905	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2906}
2907
2908struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2909				    const struct sock *sk)
2910{
2911	struct rtable *rt = __ip_route_output_key(net, flp4);
2912
2913	if (IS_ERR(rt))
2914		return rt;
2915
2916	if (flp4->flowi4_proto) {
2917		flp4->flowi4_oif = rt->dst.dev->ifindex;
2918		rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
2919						  flowi4_to_flowi(flp4),
2920						  sk, 0));
2921	}
2922
2923	return rt;
2924}
2925EXPORT_SYMBOL_GPL(ip_route_output_flow);
2926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2927/* called with rcu_read_lock held */
2928static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2929			struct rtable *rt, u32 table_id, dscp_t dscp,
2930			struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2931			u32 seq, unsigned int flags)
2932{
2933	struct rtmsg *r;
2934	struct nlmsghdr *nlh;
2935	unsigned long expires = 0;
2936	u32 error;
2937	u32 metrics[RTAX_MAX];
2938
2939	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2940	if (!nlh)
2941		return -EMSGSIZE;
2942
2943	r = nlmsg_data(nlh);
2944	r->rtm_family	 = AF_INET;
2945	r->rtm_dst_len	= 32;
2946	r->rtm_src_len	= 0;
2947	r->rtm_tos	= inet_dscp_to_dsfield(dscp);
2948	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
2949	if (nla_put_u32(skb, RTA_TABLE, table_id))
2950		goto nla_put_failure;
2951	r->rtm_type	= rt->rt_type;
2952	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2953	r->rtm_protocol = RTPROT_UNSPEC;
2954	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2955	if (rt->rt_flags & RTCF_NOTIFY)
2956		r->rtm_flags |= RTM_F_NOTIFY;
2957	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2958		r->rtm_flags |= RTCF_DOREDIRECT;
2959
2960	if (nla_put_in_addr(skb, RTA_DST, dst))
2961		goto nla_put_failure;
2962	if (src) {
2963		r->rtm_src_len = 32;
2964		if (nla_put_in_addr(skb, RTA_SRC, src))
2965			goto nla_put_failure;
2966	}
2967	if (rt->dst.dev &&
2968	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2969		goto nla_put_failure;
2970	if (rt->dst.lwtstate &&
2971	    lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2972		goto nla_put_failure;
2973#ifdef CONFIG_IP_ROUTE_CLASSID
2974	if (rt->dst.tclassid &&
2975	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2976		goto nla_put_failure;
2977#endif
2978	if (fl4 && !rt_is_input_route(rt) &&
2979	    fl4->saddr != src) {
2980		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2981			goto nla_put_failure;
2982	}
2983	if (rt->rt_uses_gateway) {
2984		if (rt->rt_gw_family == AF_INET &&
2985		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2986			goto nla_put_failure;
2987		} else if (rt->rt_gw_family == AF_INET6) {
2988			int alen = sizeof(struct in6_addr);
2989			struct nlattr *nla;
2990			struct rtvia *via;
2991
2992			nla = nla_reserve(skb, RTA_VIA, alen + 2);
2993			if (!nla)
2994				goto nla_put_failure;
2995
2996			via = nla_data(nla);
2997			via->rtvia_family = AF_INET6;
2998			memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2999		}
3000	}
3001
3002	expires = rt->dst.expires;
3003	if (expires) {
3004		unsigned long now = jiffies;
3005
3006		if (time_before(now, expires))
3007			expires -= now;
3008		else
3009			expires = 0;
3010	}
3011
3012	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3013	if (rt->rt_pmtu && expires)
3014		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
3015	if (rt->rt_mtu_locked && expires)
3016		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
3017	if (rtnetlink_put_metrics(skb, metrics) < 0)
3018		goto nla_put_failure;
3019
3020	if (fl4) {
3021		if (fl4->flowi4_mark &&
3022		    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
3023			goto nla_put_failure;
3024
3025		if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
3026		    nla_put_u32(skb, RTA_UID,
3027				from_kuid_munged(current_user_ns(),
3028						 fl4->flowi4_uid)))
3029			goto nla_put_failure;
3030
3031		if (rt_is_input_route(rt)) {
3032#ifdef CONFIG_IP_MROUTE
3033			if (ipv4_is_multicast(dst) &&
3034			    !ipv4_is_local_multicast(dst) &&
3035			    IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
3036				int err = ipmr_get_route(net, skb,
3037							 fl4->saddr, fl4->daddr,
3038							 r, portid);
3039
3040				if (err <= 0) {
3041					if (err == 0)
3042						return 0;
3043					goto nla_put_failure;
3044				}
3045			} else
3046#endif
3047				if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
3048					goto nla_put_failure;
3049		}
3050	}
3051
3052	error = rt->dst.error;
3053
3054	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3055		goto nla_put_failure;
3056
3057	nlmsg_end(skb, nlh);
3058	return 0;
3059
3060nla_put_failure:
3061	nlmsg_cancel(skb, nlh);
3062	return -EMSGSIZE;
3063}
3064
3065static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3066			    struct netlink_callback *cb, u32 table_id,
3067			    struct fnhe_hash_bucket *bucket, int genid,
3068			    int *fa_index, int fa_start, unsigned int flags)
3069{
3070	int i;
3071
3072	for (i = 0; i < FNHE_HASH_SIZE; i++) {
3073		struct fib_nh_exception *fnhe;
3074
3075		for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3076		     fnhe = rcu_dereference(fnhe->fnhe_next)) {
3077			struct rtable *rt;
3078			int err;
3079
3080			if (*fa_index < fa_start)
3081				goto next;
3082
3083			if (fnhe->fnhe_genid != genid)
3084				goto next;
3085
3086			if (fnhe->fnhe_expires &&
3087			    time_after(jiffies, fnhe->fnhe_expires))
3088				goto next;
3089
3090			rt = rcu_dereference(fnhe->fnhe_rth_input);
3091			if (!rt)
3092				rt = rcu_dereference(fnhe->fnhe_rth_output);
3093			if (!rt)
3094				goto next;
3095
3096			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3097					   table_id, 0, NULL, skb,
3098					   NETLINK_CB(cb->skb).portid,
3099					   cb->nlh->nlmsg_seq, flags);
3100			if (err)
3101				return err;
3102next:
3103			(*fa_index)++;
3104		}
3105	}
3106
3107	return 0;
3108}
3109
3110int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3111		       u32 table_id, struct fib_info *fi,
3112		       int *fa_index, int fa_start, unsigned int flags)
3113{
3114	struct net *net = sock_net(cb->skb->sk);
3115	int nhsel, genid = fnhe_genid(net);
3116
3117	for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3118		struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3119		struct fnhe_hash_bucket *bucket;
3120		int err;
3121
3122		if (nhc->nhc_flags & RTNH_F_DEAD)
3123			continue;
3124
3125		rcu_read_lock();
3126		bucket = rcu_dereference(nhc->nhc_exceptions);
3127		err = 0;
3128		if (bucket)
3129			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3130					       genid, fa_index, fa_start,
3131					       flags);
3132		rcu_read_unlock();
3133		if (err)
3134			return err;
3135	}
3136
3137	return 0;
3138}
3139
3140static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3141						   u8 ip_proto, __be16 sport,
3142						   __be16 dport)
3143{
3144	struct sk_buff *skb;
3145	struct iphdr *iph;
3146
3147	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3148	if (!skb)
3149		return NULL;
3150
3151	/* Reserve room for dummy headers, this skb can pass
3152	 * through good chunk of routing engine.
3153	 */
3154	skb_reset_mac_header(skb);
3155	skb_reset_network_header(skb);
3156	skb->protocol = htons(ETH_P_IP);
3157	iph = skb_put(skb, sizeof(struct iphdr));
3158	iph->protocol = ip_proto;
3159	iph->saddr = src;
3160	iph->daddr = dst;
3161	iph->version = 0x4;
3162	iph->frag_off = 0;
3163	iph->ihl = 0x5;
3164	skb_set_transport_header(skb, skb->len);
3165
3166	switch (iph->protocol) {
3167	case IPPROTO_UDP: {
3168		struct udphdr *udph;
3169
3170		udph = skb_put_zero(skb, sizeof(struct udphdr));
3171		udph->source = sport;
3172		udph->dest = dport;
3173		udph->len = htons(sizeof(struct udphdr));
3174		udph->check = 0;
3175		break;
3176	}
3177	case IPPROTO_TCP: {
3178		struct tcphdr *tcph;
3179
3180		tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3181		tcph->source	= sport;
3182		tcph->dest	= dport;
3183		tcph->doff	= sizeof(struct tcphdr) / 4;
3184		tcph->rst = 1;
3185		tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3186					    src, dst, 0);
3187		break;
3188	}
3189	case IPPROTO_ICMP: {
3190		struct icmphdr *icmph;
3191
3192		icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3193		icmph->type = ICMP_ECHO;
3194		icmph->code = 0;
3195	}
3196	}
3197
3198	return skb;
3199}
3200
3201static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3202				       const struct nlmsghdr *nlh,
3203				       struct nlattr **tb,
3204				       struct netlink_ext_ack *extack)
3205{
3206	struct rtmsg *rtm;
3207	int i, err;
3208
3209	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3210		NL_SET_ERR_MSG(extack,
3211			       "ipv4: Invalid header for route get request");
3212		return -EINVAL;
3213	}
3214
3215	if (!netlink_strict_get_check(skb))
3216		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3217					      rtm_ipv4_policy, extack);
3218
3219	rtm = nlmsg_data(nlh);
3220	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3221	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3222	    rtm->rtm_table || rtm->rtm_protocol ||
3223	    rtm->rtm_scope || rtm->rtm_type) {
3224		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3225		return -EINVAL;
3226	}
3227
3228	if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3229			       RTM_F_LOOKUP_TABLE |
3230			       RTM_F_FIB_MATCH)) {
3231		NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3232		return -EINVAL;
3233	}
3234
3235	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3236					    rtm_ipv4_policy, extack);
3237	if (err)
3238		return err;
3239
3240	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3241	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3242		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3243		return -EINVAL;
3244	}
3245
3246	for (i = 0; i <= RTA_MAX; i++) {
3247		if (!tb[i])
3248			continue;
3249
3250		switch (i) {
3251		case RTA_IIF:
3252		case RTA_OIF:
3253		case RTA_SRC:
3254		case RTA_DST:
3255		case RTA_IP_PROTO:
3256		case RTA_SPORT:
3257		case RTA_DPORT:
3258		case RTA_MARK:
3259		case RTA_UID:
3260			break;
3261		default:
3262			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3263			return -EINVAL;
3264		}
3265	}
3266
3267	return 0;
3268}
3269
3270static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3271			     struct netlink_ext_ack *extack)
3272{
3273	struct net *net = sock_net(in_skb->sk);
3274	struct nlattr *tb[RTA_MAX+1];
3275	u32 table_id = RT_TABLE_MAIN;
3276	__be16 sport = 0, dport = 0;
3277	struct fib_result res = {};
3278	u8 ip_proto = IPPROTO_UDP;
3279	struct rtable *rt = NULL;
3280	struct sk_buff *skb;
3281	struct rtmsg *rtm;
3282	struct flowi4 fl4 = {};
3283	__be32 dst = 0;
3284	__be32 src = 0;
3285	kuid_t uid;
3286	u32 iif;
3287	int err;
3288	int mark;
3289
3290	err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3291	if (err < 0)
3292		return err;
3293
3294	rtm = nlmsg_data(nlh);
3295	src = nla_get_in_addr_default(tb[RTA_SRC], 0);
3296	dst = nla_get_in_addr_default(tb[RTA_DST], 0);
3297	iif = nla_get_u32_default(tb[RTA_IIF], 0);
3298	mark = nla_get_u32_default(tb[RTA_MARK], 0);
3299	if (tb[RTA_UID])
3300		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3301	else
3302		uid = (iif ? INVALID_UID : current_uid());
3303
3304	if (tb[RTA_IP_PROTO]) {
3305		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3306						  &ip_proto, AF_INET, extack);
3307		if (err)
3308			return err;
3309	}
3310
3311	if (tb[RTA_SPORT])
3312		sport = nla_get_be16(tb[RTA_SPORT]);
3313
3314	if (tb[RTA_DPORT])
3315		dport = nla_get_be16(tb[RTA_DPORT]);
3316
3317	skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3318	if (!skb)
3319		return -ENOBUFS;
3320
3321	fl4.daddr = dst;
3322	fl4.saddr = src;
3323	fl4.flowi4_tos = rtm->rtm_tos & INET_DSCP_MASK;
3324	fl4.flowi4_oif = nla_get_u32_default(tb[RTA_OIF], 0);
3325	fl4.flowi4_mark = mark;
3326	fl4.flowi4_uid = uid;
3327	if (sport)
3328		fl4.fl4_sport = sport;
3329	if (dport)
3330		fl4.fl4_dport = dport;
3331	fl4.flowi4_proto = ip_proto;
3332
3333	rcu_read_lock();
3334
3335	if (iif) {
3336		struct net_device *dev;
3337
3338		dev = dev_get_by_index_rcu(net, iif);
3339		if (!dev) {
3340			err = -ENODEV;
3341			goto errout_rcu;
3342		}
3343
3344		fl4.flowi4_iif = iif; /* for rt_fill_info */
3345		skb->dev	= dev;
3346		skb->mark	= mark;
3347		err = ip_route_input_rcu(skb, dst, src,
3348					 inet_dsfield_to_dscp(rtm->rtm_tos),
3349					 dev, &res) ? -EINVAL : 0;
3350
3351		rt = skb_rtable(skb);
3352		if (err == 0 && rt->dst.error)
3353			err = -rt->dst.error;
3354	} else {
3355		fl4.flowi4_iif = LOOPBACK_IFINDEX;
3356		skb->dev = net->loopback_dev;
3357		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3358		err = 0;
3359		if (IS_ERR(rt))
3360			err = PTR_ERR(rt);
3361		else
3362			skb_dst_set(skb, &rt->dst);
3363	}
3364
3365	if (err)
3366		goto errout_rcu;
3367
3368	if (rtm->rtm_flags & RTM_F_NOTIFY)
3369		rt->rt_flags |= RTCF_NOTIFY;
3370
3371	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3372		table_id = res.table ? res.table->tb_id : 0;
3373
3374	/* reset skb for netlink reply msg */
3375	skb_trim(skb, 0);
3376	skb_reset_network_header(skb);
3377	skb_reset_transport_header(skb);
3378	skb_reset_mac_header(skb);
3379
3380	if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3381		struct fib_rt_info fri;
3382
3383		if (!res.fi) {
3384			err = fib_props[res.type].error;
3385			if (!err)
3386				err = -EHOSTUNREACH;
3387			goto errout_rcu;
3388		}
3389		fri.fi = res.fi;
3390		fri.tb_id = table_id;
3391		fri.dst = res.prefix;
3392		fri.dst_len = res.prefixlen;
3393		fri.dscp = res.dscp;
3394		fri.type = rt->rt_type;
3395		fri.offload = 0;
3396		fri.trap = 0;
3397		fri.offload_failed = 0;
3398		if (res.fa_head) {
3399			struct fib_alias *fa;
3400
3401			hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3402				u8 slen = 32 - fri.dst_len;
3403
3404				if (fa->fa_slen == slen &&
3405				    fa->tb_id == fri.tb_id &&
3406				    fa->fa_dscp == fri.dscp &&
3407				    fa->fa_info == res.fi &&
3408				    fa->fa_type == fri.type) {
3409					fri.offload = READ_ONCE(fa->offload);
3410					fri.trap = READ_ONCE(fa->trap);
3411					fri.offload_failed =
3412						READ_ONCE(fa->offload_failed);
3413					break;
3414				}
3415			}
3416		}
3417		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3418				    nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3419	} else {
3420		err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4,
3421				   skb, NETLINK_CB(in_skb).portid,
3422				   nlh->nlmsg_seq, 0);
3423	}
3424	if (err < 0)
3425		goto errout_rcu;
3426
3427	rcu_read_unlock();
3428
3429	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3430
3431errout_free:
3432	return err;
3433errout_rcu:
3434	rcu_read_unlock();
3435	kfree_skb(skb);
3436	goto errout_free;
3437}
3438
3439void ip_rt_multicast_event(struct in_device *in_dev)
3440{
3441	rt_cache_flush(dev_net(in_dev->dev));
3442}
3443
3444#ifdef CONFIG_SYSCTL
3445static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3446static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
3447static int ip_rt_gc_elasticity __read_mostly	= 8;
3448static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
3449
3450static int ipv4_sysctl_rtcache_flush(const struct ctl_table *__ctl, int write,
3451		void *buffer, size_t *lenp, loff_t *ppos)
3452{
3453	struct net *net = (struct net *)__ctl->extra1;
3454
3455	if (write) {
3456		rt_cache_flush(net);
3457		fnhe_genid_bump(net);
3458		return 0;
3459	}
3460
3461	return -EINVAL;
3462}
3463
3464static struct ctl_table ipv4_route_table[] = {
3465	{
3466		.procname	= "gc_thresh",
3467		.data		= &ipv4_dst_ops.gc_thresh,
3468		.maxlen		= sizeof(int),
3469		.mode		= 0644,
3470		.proc_handler	= proc_dointvec,
3471	},
3472	{
3473		.procname	= "max_size",
3474		.data		= &ip_rt_max_size,
3475		.maxlen		= sizeof(int),
3476		.mode		= 0644,
3477		.proc_handler	= proc_dointvec,
3478	},
3479	{
3480		/*  Deprecated. Use gc_min_interval_ms */
3481
3482		.procname	= "gc_min_interval",
3483		.data		= &ip_rt_gc_min_interval,
3484		.maxlen		= sizeof(int),
3485		.mode		= 0644,
3486		.proc_handler	= proc_dointvec_jiffies,
3487	},
3488	{
3489		.procname	= "gc_min_interval_ms",
3490		.data		= &ip_rt_gc_min_interval,
3491		.maxlen		= sizeof(int),
3492		.mode		= 0644,
3493		.proc_handler	= proc_dointvec_ms_jiffies,
3494	},
3495	{
3496		.procname	= "gc_timeout",
3497		.data		= &ip_rt_gc_timeout,
3498		.maxlen		= sizeof(int),
3499		.mode		= 0644,
3500		.proc_handler	= proc_dointvec_jiffies,
3501	},
3502	{
3503		.procname	= "gc_interval",
3504		.data		= &ip_rt_gc_interval,
3505		.maxlen		= sizeof(int),
3506		.mode		= 0644,
3507		.proc_handler	= proc_dointvec_jiffies,
3508	},
3509	{
3510		.procname	= "redirect_load",
3511		.data		= &ip_rt_redirect_load,
3512		.maxlen		= sizeof(int),
3513		.mode		= 0644,
3514		.proc_handler	= proc_dointvec,
3515	},
3516	{
3517		.procname	= "redirect_number",
3518		.data		= &ip_rt_redirect_number,
3519		.maxlen		= sizeof(int),
3520		.mode		= 0644,
3521		.proc_handler	= proc_dointvec,
3522	},
3523	{
3524		.procname	= "redirect_silence",
3525		.data		= &ip_rt_redirect_silence,
3526		.maxlen		= sizeof(int),
3527		.mode		= 0644,
3528		.proc_handler	= proc_dointvec,
3529	},
3530	{
3531		.procname	= "error_cost",
3532		.data		= &ip_rt_error_cost,
3533		.maxlen		= sizeof(int),
3534		.mode		= 0644,
3535		.proc_handler	= proc_dointvec,
3536	},
3537	{
3538		.procname	= "error_burst",
3539		.data		= &ip_rt_error_burst,
3540		.maxlen		= sizeof(int),
3541		.mode		= 0644,
3542		.proc_handler	= proc_dointvec,
3543	},
3544	{
3545		.procname	= "gc_elasticity",
3546		.data		= &ip_rt_gc_elasticity,
3547		.maxlen		= sizeof(int),
3548		.mode		= 0644,
3549		.proc_handler	= proc_dointvec,
3550	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3551};
3552
3553static const char ipv4_route_flush_procname[] = "flush";
3554
3555static struct ctl_table ipv4_route_netns_table[] = {
3556	{
3557		.procname	= ipv4_route_flush_procname,
3558		.maxlen		= sizeof(int),
3559		.mode		= 0200,
3560		.proc_handler	= ipv4_sysctl_rtcache_flush,
3561	},
3562	{
3563		.procname       = "min_pmtu",
3564		.data           = &init_net.ipv4.ip_rt_min_pmtu,
3565		.maxlen         = sizeof(int),
3566		.mode           = 0644,
3567		.proc_handler   = proc_dointvec_minmax,
3568		.extra1         = &ip_min_valid_pmtu,
3569	},
3570	{
3571		.procname       = "mtu_expires",
3572		.data           = &init_net.ipv4.ip_rt_mtu_expires,
3573		.maxlen         = sizeof(int),
3574		.mode           = 0644,
3575		.proc_handler   = proc_dointvec_jiffies,
3576	},
3577	{
3578		.procname   = "min_adv_mss",
3579		.data       = &init_net.ipv4.ip_rt_min_advmss,
3580		.maxlen     = sizeof(int),
3581		.mode       = 0644,
3582		.proc_handler   = proc_dointvec,
3583	},
3584};
3585
3586static __net_init int sysctl_route_net_init(struct net *net)
3587{
3588	struct ctl_table *tbl;
3589	size_t table_size = ARRAY_SIZE(ipv4_route_netns_table);
3590
3591	tbl = ipv4_route_netns_table;
3592	if (!net_eq(net, &init_net)) {
3593		int i;
3594
3595		tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3596		if (!tbl)
3597			goto err_dup;
3598
3599		/* Don't export non-whitelisted sysctls to unprivileged users */
3600		if (net->user_ns != &init_user_ns) {
3601			if (tbl[0].procname != ipv4_route_flush_procname)
3602				table_size = 0;
3603		}
3604
3605		/* Update the variables to point into the current struct net
3606		 * except for the first element flush
3607		 */
3608		for (i = 1; i < table_size; i++)
3609			tbl[i].data += (void *)net - (void *)&init_net;
3610	}
3611	tbl[0].extra1 = net;
3612
3613	net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route",
3614						     tbl, table_size);
3615	if (!net->ipv4.route_hdr)
3616		goto err_reg;
3617	return 0;
3618
3619err_reg:
3620	if (tbl != ipv4_route_netns_table)
3621		kfree(tbl);
3622err_dup:
3623	return -ENOMEM;
3624}
3625
3626static __net_exit void sysctl_route_net_exit(struct net *net)
3627{
3628	const struct ctl_table *tbl;
3629
3630	tbl = net->ipv4.route_hdr->ctl_table_arg;
3631	unregister_net_sysctl_table(net->ipv4.route_hdr);
3632	BUG_ON(tbl == ipv4_route_netns_table);
3633	kfree(tbl);
3634}
3635
3636static __net_initdata struct pernet_operations sysctl_route_ops = {
3637	.init = sysctl_route_net_init,
3638	.exit = sysctl_route_net_exit,
3639};
3640#endif
3641
3642static __net_init int netns_ip_rt_init(struct net *net)
3643{
3644	/* Set default value for namespaceified sysctls */
3645	net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
3646	net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
3647	net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
3648	return 0;
3649}
3650
3651static struct pernet_operations __net_initdata ip_rt_ops = {
3652	.init = netns_ip_rt_init,
3653};
3654
3655static __net_init int rt_genid_init(struct net *net)
3656{
3657	atomic_set(&net->ipv4.rt_genid, 0);
3658	atomic_set(&net->fnhe_genid, 0);
3659	atomic_set(&net->ipv4.dev_addr_genid, get_random_u32());
3660	return 0;
3661}
3662
3663static __net_initdata struct pernet_operations rt_genid_ops = {
3664	.init = rt_genid_init,
3665};
3666
3667static int __net_init ipv4_inetpeer_init(struct net *net)
3668{
3669	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3670
3671	if (!bp)
3672		return -ENOMEM;
3673	inet_peer_base_init(bp);
3674	net->ipv4.peers = bp;
3675	return 0;
3676}
3677
3678static void __net_exit ipv4_inetpeer_exit(struct net *net)
3679{
3680	struct inet_peer_base *bp = net->ipv4.peers;
3681
3682	net->ipv4.peers = NULL;
3683	inetpeer_invalidate_tree(bp);
3684	kfree(bp);
3685}
3686
3687static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3688	.init	=	ipv4_inetpeer_init,
3689	.exit	=	ipv4_inetpeer_exit,
3690};
3691
3692#ifdef CONFIG_IP_ROUTE_CLASSID
3693struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3694#endif /* CONFIG_IP_ROUTE_CLASSID */
3695
3696static const struct rtnl_msg_handler ip_rt_rtnl_msg_handlers[] __initconst = {
3697	{.protocol = PF_INET, .msgtype = RTM_GETROUTE,
3698	 .doit = inet_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
3699};
3700
3701int __init ip_rt_init(void)
3702{
3703	void *idents_hash;
3704	int cpu;
3705
3706	/* For modern hosts, this will use 2 MB of memory */
3707	idents_hash = alloc_large_system_hash("IP idents",
3708					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
3709					      0,
3710					      16, /* one bucket per 64 KB */
3711					      HASH_ZERO,
3712					      NULL,
3713					      &ip_idents_mask,
3714					      2048,
3715					      256*1024);
3716
3717	ip_idents = idents_hash;
3718
3719	get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3720
3721	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3722
3723	for_each_possible_cpu(cpu) {
3724		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3725
3726		INIT_LIST_HEAD(&ul->head);
3727		spin_lock_init(&ul->lock);
3728	}
3729#ifdef CONFIG_IP_ROUTE_CLASSID
3730	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3731	if (!ip_rt_acct)
3732		panic("IP: failed to allocate ip_rt_acct\n");
3733#endif
3734
3735	ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
3736					      SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 
3737
3738	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3739
3740	if (dst_entries_init(&ipv4_dst_ops) < 0)
3741		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3742
3743	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3744		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3745
3746	ipv4_dst_ops.gc_thresh = ~0;
3747	ip_rt_max_size = INT_MAX;
3748
3749	devinet_init();
3750	ip_fib_init();
3751
3752	if (ip_rt_proc_init())
3753		pr_err("Unable to create route proc files\n");
3754#ifdef CONFIG_XFRM
3755	xfrm_init();
3756	xfrm4_init();
3757#endif
3758	rtnl_register_many(ip_rt_rtnl_msg_handlers);
 
3759
3760#ifdef CONFIG_SYSCTL
3761	register_pernet_subsys(&sysctl_route_ops);
3762#endif
3763	register_pernet_subsys(&ip_rt_ops);
3764	register_pernet_subsys(&rt_genid_ops);
3765	register_pernet_subsys(&ipv4_inetpeer_ops);
3766	return 0;
3767}
3768
3769#ifdef CONFIG_SYSCTL
3770/*
3771 * We really need to sanitize the damn ipv4 init order, then all
3772 * this nonsense will go away.
3773 */
3774void __init ip_static_sysctl_init(void)
3775{
3776	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3777}
3778#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		ROUTE - implementation of the IP router.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
  13 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  14 *
  15 * Fixes:
  16 *		Alan Cox	:	Verify area fixes.
  17 *		Alan Cox	:	cli() protects routing changes
  18 *		Rui Oliveira	:	ICMP routing table updates
  19 *		(rco@di.uminho.pt)	Routing table insertion and update
  20 *		Linus Torvalds	:	Rewrote bits to be sensible
  21 *		Alan Cox	:	Added BSD route gw semantics
  22 *		Alan Cox	:	Super /proc >4K
  23 *		Alan Cox	:	MTU in route table
  24 *		Alan Cox	:	MSS actually. Also added the window
  25 *					clamper.
  26 *		Sam Lantinga	:	Fixed route matching in rt_del()
  27 *		Alan Cox	:	Routing cache support.
  28 *		Alan Cox	:	Removed compatibility cruft.
  29 *		Alan Cox	:	RTF_REJECT support.
  30 *		Alan Cox	:	TCP irtt support.
  31 *		Jonathan Naylor	:	Added Metric support.
  32 *	Miquel van Smoorenburg	:	BSD API fixes.
  33 *	Miquel van Smoorenburg	:	Metrics.
  34 *		Alan Cox	:	Use __u32 properly
  35 *		Alan Cox	:	Aligned routing errors more closely with BSD
  36 *					our system is still very different.
  37 *		Alan Cox	:	Faster /proc handling
  38 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
  39 *					routing caches and better behaviour.
  40 *
  41 *		Olaf Erb	:	irtt wasn't being copied right.
  42 *		Bjorn Ekwall	:	Kerneld route support.
  43 *		Alan Cox	:	Multicast fixed (I hope)
  44 *		Pavel Krauz	:	Limited broadcast fixed
  45 *		Mike McLagan	:	Routing by source
  46 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
  47 *					route.c and rewritten from scratch.
  48 *		Andi Kleen	:	Load-limit warning messages.
  49 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  50 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
  51 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
  52 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
  53 *		Marc Boucher	:	routing by fwmark
  54 *	Robert Olsson		:	Added rt_cache statistics
  55 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
  56 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
  57 *	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
  58 *	Ilia Sotnikov		:	Removed TOS from hash calculations
  59 */
  60
  61#define pr_fmt(fmt) "IPv4: " fmt
  62
  63#include <linux/module.h>
  64#include <linux/uaccess.h>
  65#include <linux/bitops.h>
  66#include <linux/types.h>
  67#include <linux/kernel.h>
  68#include <linux/mm.h>
  69#include <linux/memblock.h>
  70#include <linux/string.h>
  71#include <linux/socket.h>
  72#include <linux/sockios.h>
  73#include <linux/errno.h>
  74#include <linux/in.h>
  75#include <linux/inet.h>
  76#include <linux/netdevice.h>
  77#include <linux/proc_fs.h>
  78#include <linux/init.h>
  79#include <linux/skbuff.h>
  80#include <linux/inetdevice.h>
  81#include <linux/igmp.h>
  82#include <linux/pkt_sched.h>
  83#include <linux/mroute.h>
  84#include <linux/netfilter_ipv4.h>
  85#include <linux/random.h>
  86#include <linux/rcupdate.h>
  87#include <linux/times.h>
  88#include <linux/slab.h>
  89#include <linux/jhash.h>
  90#include <net/dst.h>
  91#include <net/dst_metadata.h>
 
  92#include <net/net_namespace.h>
  93#include <net/protocol.h>
  94#include <net/ip.h>
  95#include <net/route.h>
  96#include <net/inetpeer.h>
  97#include <net/sock.h>
  98#include <net/ip_fib.h>
  99#include <net/nexthop.h>
 100#include <net/arp.h>
 101#include <net/tcp.h>
 102#include <net/icmp.h>
 103#include <net/xfrm.h>
 104#include <net/lwtunnel.h>
 105#include <net/netevent.h>
 106#include <net/rtnetlink.h>
 107#ifdef CONFIG_SYSCTL
 108#include <linux/sysctl.h>
 109#endif
 110#include <net/secure_seq.h>
 111#include <net/ip_tunnels.h>
 112#include <net/l3mdev.h>
 113
 114#include "fib_lookup.h"
 115
 116#define RT_FL_TOS(oldflp4) \
 117	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 118
 119#define RT_GC_TIMEOUT (300*HZ)
 120
 
 
 
 121static int ip_rt_max_size;
 122static int ip_rt_redirect_number __read_mostly	= 9;
 123static int ip_rt_redirect_load __read_mostly	= HZ / 50;
 124static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
 125static int ip_rt_error_cost __read_mostly	= HZ;
 126static int ip_rt_error_burst __read_mostly	= 5 * HZ;
 127static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
 128static u32 ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
 129static int ip_rt_min_advmss __read_mostly	= 256;
 130
 131static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
 132
 133/*
 134 *	Interface to generic destination cache.
 135 */
 136
 137INDIRECT_CALLABLE_SCOPE
 138struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 139static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
 140INDIRECT_CALLABLE_SCOPE
 141unsigned int		ipv4_mtu(const struct dst_entry *dst);
 142static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 
 143static void		 ipv4_link_failure(struct sk_buff *skb);
 144static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 145					   struct sk_buff *skb, u32 mtu,
 146					   bool confirm_neigh);
 147static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
 148					struct sk_buff *skb);
 149static void		ipv4_dst_destroy(struct dst_entry *dst);
 150
 151static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
 152{
 153	WARN_ON(1);
 154	return NULL;
 155}
 156
 157static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 158					   struct sk_buff *skb,
 159					   const void *daddr);
 160static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
 161
 162static struct dst_ops ipv4_dst_ops = {
 163	.family =		AF_INET,
 164	.check =		ipv4_dst_check,
 165	.default_advmss =	ipv4_default_advmss,
 166	.mtu =			ipv4_mtu,
 167	.cow_metrics =		ipv4_cow_metrics,
 168	.destroy =		ipv4_dst_destroy,
 169	.negative_advice =	ipv4_negative_advice,
 170	.link_failure =		ipv4_link_failure,
 171	.update_pmtu =		ip_rt_update_pmtu,
 172	.redirect =		ip_do_redirect,
 173	.local_out =		__ip_local_out,
 174	.neigh_lookup =		ipv4_neigh_lookup,
 175	.confirm_neigh =	ipv4_confirm_neigh,
 176};
 177
 178#define ECN_OR_COST(class)	TC_PRIO_##class
 179
 180const __u8 ip_tos2prio[16] = {
 181	TC_PRIO_BESTEFFORT,
 182	ECN_OR_COST(BESTEFFORT),
 183	TC_PRIO_BESTEFFORT,
 184	ECN_OR_COST(BESTEFFORT),
 185	TC_PRIO_BULK,
 186	ECN_OR_COST(BULK),
 187	TC_PRIO_BULK,
 188	ECN_OR_COST(BULK),
 189	TC_PRIO_INTERACTIVE,
 190	ECN_OR_COST(INTERACTIVE),
 191	TC_PRIO_INTERACTIVE,
 192	ECN_OR_COST(INTERACTIVE),
 193	TC_PRIO_INTERACTIVE_BULK,
 194	ECN_OR_COST(INTERACTIVE_BULK),
 195	TC_PRIO_INTERACTIVE_BULK,
 196	ECN_OR_COST(INTERACTIVE_BULK)
 197};
 198EXPORT_SYMBOL(ip_tos2prio);
 199
 200static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
 201#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
 202
 203#ifdef CONFIG_PROC_FS
 204static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 205{
 206	if (*pos)
 207		return NULL;
 208	return SEQ_START_TOKEN;
 209}
 210
 211static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 212{
 213	++*pos;
 214	return NULL;
 215}
 216
 217static void rt_cache_seq_stop(struct seq_file *seq, void *v)
 218{
 219}
 220
 221static int rt_cache_seq_show(struct seq_file *seq, void *v)
 222{
 223	if (v == SEQ_START_TOKEN)
 224		seq_printf(seq, "%-127s\n",
 225			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
 226			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
 227			   "HHUptod\tSpecDst");
 228	return 0;
 229}
 230
 231static const struct seq_operations rt_cache_seq_ops = {
 232	.start  = rt_cache_seq_start,
 233	.next   = rt_cache_seq_next,
 234	.stop   = rt_cache_seq_stop,
 235	.show   = rt_cache_seq_show,
 236};
 237
 238static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 239{
 240	int cpu;
 241
 242	if (*pos == 0)
 243		return SEQ_START_TOKEN;
 244
 245	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
 246		if (!cpu_possible(cpu))
 247			continue;
 248		*pos = cpu+1;
 249		return &per_cpu(rt_cache_stat, cpu);
 250	}
 251	return NULL;
 252}
 253
 254static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 255{
 256	int cpu;
 257
 258	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
 259		if (!cpu_possible(cpu))
 260			continue;
 261		*pos = cpu+1;
 262		return &per_cpu(rt_cache_stat, cpu);
 263	}
 264	(*pos)++;
 265	return NULL;
 266
 267}
 268
 269static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
 270{
 271
 272}
 273
 274static int rt_cpu_seq_show(struct seq_file *seq, void *v)
 275{
 276	struct rt_cache_stat *st = v;
 277
 278	if (v == SEQ_START_TOKEN) {
 279		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
 280		return 0;
 281	}
 282
 283	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
 284		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
 
 285		   dst_entries_get_slow(&ipv4_dst_ops),
 286		   0, /* st->in_hit */
 287		   st->in_slow_tot,
 288		   st->in_slow_mc,
 289		   st->in_no_route,
 290		   st->in_brd,
 291		   st->in_martian_dst,
 292		   st->in_martian_src,
 293
 294		   0, /* st->out_hit */
 295		   st->out_slow_tot,
 296		   st->out_slow_mc,
 297
 298		   0, /* st->gc_total */
 299		   0, /* st->gc_ignored */
 300		   0, /* st->gc_goal_miss */
 301		   0, /* st->gc_dst_overflow */
 302		   0, /* st->in_hlist_search */
 303		   0  /* st->out_hlist_search */
 304		);
 305	return 0;
 306}
 307
 308static const struct seq_operations rt_cpu_seq_ops = {
 309	.start  = rt_cpu_seq_start,
 310	.next   = rt_cpu_seq_next,
 311	.stop   = rt_cpu_seq_stop,
 312	.show   = rt_cpu_seq_show,
 313};
 314
 315#ifdef CONFIG_IP_ROUTE_CLASSID
 316static int rt_acct_proc_show(struct seq_file *m, void *v)
 317{
 318	struct ip_rt_acct *dst, *src;
 319	unsigned int i, j;
 320
 321	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
 322	if (!dst)
 323		return -ENOMEM;
 324
 325	for_each_possible_cpu(i) {
 326		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
 327		for (j = 0; j < 256; j++) {
 328			dst[j].o_bytes   += src[j].o_bytes;
 329			dst[j].o_packets += src[j].o_packets;
 330			dst[j].i_bytes   += src[j].i_bytes;
 331			dst[j].i_packets += src[j].i_packets;
 332		}
 333	}
 334
 335	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
 336	kfree(dst);
 337	return 0;
 338}
 339#endif
 340
 341static int __net_init ip_rt_do_proc_init(struct net *net)
 342{
 343	struct proc_dir_entry *pde;
 344
 345	pde = proc_create_seq("rt_cache", 0444, net->proc_net,
 346			      &rt_cache_seq_ops);
 347	if (!pde)
 348		goto err1;
 349
 350	pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
 351			      &rt_cpu_seq_ops);
 352	if (!pde)
 353		goto err2;
 354
 355#ifdef CONFIG_IP_ROUTE_CLASSID
 356	pde = proc_create_single("rt_acct", 0, net->proc_net,
 357			rt_acct_proc_show);
 358	if (!pde)
 359		goto err3;
 360#endif
 361	return 0;
 362
 363#ifdef CONFIG_IP_ROUTE_CLASSID
 364err3:
 365	remove_proc_entry("rt_cache", net->proc_net_stat);
 366#endif
 367err2:
 368	remove_proc_entry("rt_cache", net->proc_net);
 369err1:
 370	return -ENOMEM;
 371}
 372
 373static void __net_exit ip_rt_do_proc_exit(struct net *net)
 374{
 375	remove_proc_entry("rt_cache", net->proc_net_stat);
 376	remove_proc_entry("rt_cache", net->proc_net);
 377#ifdef CONFIG_IP_ROUTE_CLASSID
 378	remove_proc_entry("rt_acct", net->proc_net);
 379#endif
 380}
 381
 382static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
 383	.init = ip_rt_do_proc_init,
 384	.exit = ip_rt_do_proc_exit,
 385};
 386
 387static int __init ip_rt_proc_init(void)
 388{
 389	return register_pernet_subsys(&ip_rt_proc_ops);
 390}
 391
 392#else
 393static inline int ip_rt_proc_init(void)
 394{
 395	return 0;
 396}
 397#endif /* CONFIG_PROC_FS */
 398
 399static inline bool rt_is_expired(const struct rtable *rth)
 400{
 401	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
 
 
 
 
 
 
 402}
 403
 404void rt_cache_flush(struct net *net)
 405{
 406	rt_genid_bump_ipv4(net);
 407}
 408
 409static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 410					   struct sk_buff *skb,
 411					   const void *daddr)
 412{
 413	const struct rtable *rt = container_of(dst, struct rtable, dst);
 414	struct net_device *dev = dst->dev;
 415	struct neighbour *n;
 416
 417	rcu_read_lock_bh();
 418
 419	if (likely(rt->rt_gw_family == AF_INET)) {
 420		n = ip_neigh_gw4(dev, rt->rt_gw4);
 421	} else if (rt->rt_gw_family == AF_INET6) {
 422		n = ip_neigh_gw6(dev, &rt->rt_gw6);
 423        } else {
 424		__be32 pkey;
 425
 426		pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
 427		n = ip_neigh_gw4(dev, pkey);
 428	}
 429
 430	if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
 431		n = NULL;
 432
 433	rcu_read_unlock_bh();
 434
 435	return n;
 436}
 437
 438static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
 439{
 440	const struct rtable *rt = container_of(dst, struct rtable, dst);
 441	struct net_device *dev = dst->dev;
 442	const __be32 *pkey = daddr;
 443
 444	if (rt->rt_gw_family == AF_INET) {
 445		pkey = (const __be32 *)&rt->rt_gw4;
 446	} else if (rt->rt_gw_family == AF_INET6) {
 447		return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
 448	} else if (!daddr ||
 449		 (rt->rt_flags &
 450		  (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
 451		return;
 452	}
 453	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
 454}
 455
 456/* Hash tables of size 2048..262144 depending on RAM size.
 457 * Each bucket uses 8 bytes.
 458 */
 459static u32 ip_idents_mask __read_mostly;
 460static atomic_t *ip_idents __read_mostly;
 461static u32 *ip_tstamps __read_mostly;
 462
 463/* In order to protect privacy, we add a perturbation to identifiers
 464 * if one generator is seldom used. This makes hard for an attacker
 465 * to infer how many packets were sent between two points in time.
 466 */
 467u32 ip_idents_reserve(u32 hash, int segs)
 468{
 469	u32 bucket, old, now = (u32)jiffies;
 470	atomic_t *p_id;
 471	u32 *p_tstamp;
 472	u32 delta = 0;
 473
 474	bucket = hash & ip_idents_mask;
 475	p_tstamp = ip_tstamps + bucket;
 476	p_id = ip_idents + bucket;
 477	old = READ_ONCE(*p_tstamp);
 478
 479	if (old != now && cmpxchg(p_tstamp, old, now) == old)
 480		delta = prandom_u32_max(now - old);
 481
 482	/* If UBSAN reports an error there, please make sure your compiler
 483	 * supports -fno-strict-overflow before reporting it that was a bug
 484	 * in UBSAN, and it has been fixed in GCC-8.
 485	 */
 486	return atomic_add_return(segs + delta, p_id) - segs;
 487}
 488EXPORT_SYMBOL(ip_idents_reserve);
 489
 490void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
 491{
 492	u32 hash, id;
 493
 494	/* Note the following code is not safe, but this is okay. */
 495	if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
 496		get_random_bytes(&net->ipv4.ip_id_key,
 497				 sizeof(net->ipv4.ip_id_key));
 498
 499	hash = siphash_3u32((__force u32)iph->daddr,
 500			    (__force u32)iph->saddr,
 501			    iph->protocol,
 502			    &net->ipv4.ip_id_key);
 503	id = ip_idents_reserve(hash, segs);
 504	iph->id = htons(id);
 505}
 506EXPORT_SYMBOL(__ip_select_ident);
 507
 508static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
 509			     const struct sock *sk,
 510			     const struct iphdr *iph,
 511			     int oif, u8 tos,
 512			     u8 prot, u32 mark, int flow_flags)
 513{
 
 
 514	if (sk) {
 515		const struct inet_sock *inet = inet_sk(sk);
 
 
 
 
 
 
 516
 517		oif = sk->sk_bound_dev_if;
 518		mark = sk->sk_mark;
 519		tos = RT_CONN_FLAGS(sk);
 520		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
 521	}
 522	flowi4_init_output(fl4, oif, mark, tos,
 523			   RT_SCOPE_UNIVERSE, prot,
 524			   flow_flags,
 525			   iph->daddr, iph->saddr, 0, 0,
 526			   sock_net_uid(net, sk));
 527}
 528
 529static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
 530			       const struct sock *sk)
 531{
 532	const struct net *net = dev_net(skb->dev);
 533	const struct iphdr *iph = ip_hdr(skb);
 534	int oif = skb->dev->ifindex;
 535	u8 tos = RT_TOS(iph->tos);
 536	u8 prot = iph->protocol;
 537	u32 mark = skb->mark;
 
 538
 539	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
 540}
 541
 542static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
 543{
 544	const struct inet_sock *inet = inet_sk(sk);
 545	const struct ip_options_rcu *inet_opt;
 546	__be32 daddr = inet->inet_daddr;
 547
 548	rcu_read_lock();
 549	inet_opt = rcu_dereference(inet->inet_opt);
 550	if (inet_opt && inet_opt->opt.srr)
 551		daddr = inet_opt->opt.faddr;
 552	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 553			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 554			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
 
 
 555			   inet_sk_flowi_flags(sk),
 556			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
 557	rcu_read_unlock();
 558}
 559
 560static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
 561				 const struct sk_buff *skb)
 562{
 563	if (skb)
 564		build_skb_flow_key(fl4, skb, sk);
 565	else
 566		build_sk_flow_key(fl4, sk);
 567}
 568
 569static DEFINE_SPINLOCK(fnhe_lock);
 570
 571static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
 572{
 573	struct rtable *rt;
 574
 575	rt = rcu_dereference(fnhe->fnhe_rth_input);
 576	if (rt) {
 577		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
 578		dst_dev_put(&rt->dst);
 579		dst_release(&rt->dst);
 580	}
 581	rt = rcu_dereference(fnhe->fnhe_rth_output);
 582	if (rt) {
 583		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
 584		dst_dev_put(&rt->dst);
 585		dst_release(&rt->dst);
 586	}
 587}
 588
 589static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
 590{
 591	struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
 592	struct fib_nh_exception *fnhe, *oldest = NULL;
 593
 594	for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
 595		fnhe = rcu_dereference_protected(*fnhe_p,
 596						 lockdep_is_held(&fnhe_lock));
 597		if (!fnhe)
 598			break;
 599		if (!oldest ||
 600		    time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
 601			oldest = fnhe;
 602			oldest_p = fnhe_p;
 603		}
 604	}
 605	fnhe_flush_routes(oldest);
 606	*oldest_p = oldest->fnhe_next;
 607	kfree_rcu(oldest, rcu);
 608}
 609
 610static u32 fnhe_hashfun(__be32 daddr)
 611{
 612	static siphash_key_t fnhe_hash_key __read_mostly;
 613	u64 hval;
 614
 615	net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
 616	hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
 617	return hash_64(hval, FNHE_HASH_SHIFT);
 618}
 619
 620static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
 621{
 622	rt->rt_pmtu = fnhe->fnhe_pmtu;
 623	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
 624	rt->dst.expires = fnhe->fnhe_expires;
 625
 626	if (fnhe->fnhe_gw) {
 627		rt->rt_flags |= RTCF_REDIRECTED;
 628		rt->rt_uses_gateway = 1;
 629		rt->rt_gw_family = AF_INET;
 630		rt->rt_gw4 = fnhe->fnhe_gw;
 631	}
 632}
 633
 634static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
 635				  __be32 gw, u32 pmtu, bool lock,
 636				  unsigned long expires)
 637{
 638	struct fnhe_hash_bucket *hash;
 639	struct fib_nh_exception *fnhe;
 640	struct rtable *rt;
 641	u32 genid, hval;
 642	unsigned int i;
 643	int depth;
 644
 645	genid = fnhe_genid(dev_net(nhc->nhc_dev));
 646	hval = fnhe_hashfun(daddr);
 647
 648	spin_lock_bh(&fnhe_lock);
 649
 650	hash = rcu_dereference(nhc->nhc_exceptions);
 651	if (!hash) {
 652		hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
 653		if (!hash)
 654			goto out_unlock;
 655		rcu_assign_pointer(nhc->nhc_exceptions, hash);
 656	}
 657
 658	hash += hval;
 659
 660	depth = 0;
 661	for (fnhe = rcu_dereference(hash->chain); fnhe;
 662	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
 663		if (fnhe->fnhe_daddr == daddr)
 664			break;
 665		depth++;
 666	}
 667
 668	if (fnhe) {
 669		if (fnhe->fnhe_genid != genid)
 670			fnhe->fnhe_genid = genid;
 671		if (gw)
 672			fnhe->fnhe_gw = gw;
 673		if (pmtu) {
 674			fnhe->fnhe_pmtu = pmtu;
 675			fnhe->fnhe_mtu_locked = lock;
 676		}
 677		fnhe->fnhe_expires = max(1UL, expires);
 678		/* Update all cached dsts too */
 679		rt = rcu_dereference(fnhe->fnhe_rth_input);
 680		if (rt)
 681			fill_route_from_fnhe(rt, fnhe);
 682		rt = rcu_dereference(fnhe->fnhe_rth_output);
 683		if (rt)
 684			fill_route_from_fnhe(rt, fnhe);
 685	} else {
 686		/* Randomize max depth to avoid some side channels attacks. */
 687		int max_depth = FNHE_RECLAIM_DEPTH +
 688				prandom_u32_max(FNHE_RECLAIM_DEPTH);
 689
 690		while (depth > max_depth) {
 691			fnhe_remove_oldest(hash);
 692			depth--;
 693		}
 694
 695		fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
 696		if (!fnhe)
 697			goto out_unlock;
 698
 699		fnhe->fnhe_next = hash->chain;
 700
 701		fnhe->fnhe_genid = genid;
 702		fnhe->fnhe_daddr = daddr;
 703		fnhe->fnhe_gw = gw;
 704		fnhe->fnhe_pmtu = pmtu;
 705		fnhe->fnhe_mtu_locked = lock;
 706		fnhe->fnhe_expires = max(1UL, expires);
 707
 708		rcu_assign_pointer(hash->chain, fnhe);
 709
 710		/* Exception created; mark the cached routes for the nexthop
 711		 * stale, so anyone caching it rechecks if this exception
 712		 * applies to them.
 713		 */
 714		rt = rcu_dereference(nhc->nhc_rth_input);
 715		if (rt)
 716			rt->dst.obsolete = DST_OBSOLETE_KILL;
 717
 718		for_each_possible_cpu(i) {
 719			struct rtable __rcu **prt;
 720
 721			prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
 722			rt = rcu_dereference(*prt);
 723			if (rt)
 724				rt->dst.obsolete = DST_OBSOLETE_KILL;
 725		}
 726	}
 727
 728	fnhe->fnhe_stamp = jiffies;
 729
 730out_unlock:
 731	spin_unlock_bh(&fnhe_lock);
 732}
 733
 734static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
 735			     bool kill_route)
 736{
 737	__be32 new_gw = icmp_hdr(skb)->un.gateway;
 738	__be32 old_gw = ip_hdr(skb)->saddr;
 739	struct net_device *dev = skb->dev;
 740	struct in_device *in_dev;
 741	struct fib_result res;
 742	struct neighbour *n;
 743	struct net *net;
 744
 745	switch (icmp_hdr(skb)->code & 7) {
 746	case ICMP_REDIR_NET:
 747	case ICMP_REDIR_NETTOS:
 748	case ICMP_REDIR_HOST:
 749	case ICMP_REDIR_HOSTTOS:
 750		break;
 751
 752	default:
 753		return;
 754	}
 755
 756	if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
 757		return;
 758
 759	in_dev = __in_dev_get_rcu(dev);
 760	if (!in_dev)
 761		return;
 762
 763	net = dev_net(dev);
 764	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
 765	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
 766	    ipv4_is_zeronet(new_gw))
 767		goto reject_redirect;
 768
 769	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
 770		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
 771			goto reject_redirect;
 772		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
 773			goto reject_redirect;
 774	} else {
 775		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
 776			goto reject_redirect;
 777	}
 778
 779	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
 780	if (!n)
 781		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
 782	if (!IS_ERR(n)) {
 783		if (!(n->nud_state & NUD_VALID)) {
 784			neigh_event_send(n, NULL);
 785		} else {
 786			if (fib_lookup(net, fl4, &res, 0) == 0) {
 787				struct fib_nh_common *nhc;
 788
 789				fib_select_path(net, &res, fl4, skb);
 790				nhc = FIB_RES_NHC(res);
 791				update_or_create_fnhe(nhc, fl4->daddr, new_gw,
 792						0, false,
 793						jiffies + ip_rt_gc_timeout);
 794			}
 795			if (kill_route)
 796				rt->dst.obsolete = DST_OBSOLETE_KILL;
 797			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
 798		}
 799		neigh_release(n);
 800	}
 801	return;
 802
 803reject_redirect:
 804#ifdef CONFIG_IP_ROUTE_VERBOSE
 805	if (IN_DEV_LOG_MARTIANS(in_dev)) {
 806		const struct iphdr *iph = (const struct iphdr *) skb->data;
 807		__be32 daddr = iph->daddr;
 808		__be32 saddr = iph->saddr;
 809
 810		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
 811				     "  Advised path = %pI4 -> %pI4\n",
 812				     &old_gw, dev->name, &new_gw,
 813				     &saddr, &daddr);
 814	}
 815#endif
 816	;
 817}
 818
 819static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
 820{
 821	struct rtable *rt;
 822	struct flowi4 fl4;
 823	const struct iphdr *iph = (const struct iphdr *) skb->data;
 824	struct net *net = dev_net(skb->dev);
 825	int oif = skb->dev->ifindex;
 826	u8 tos = RT_TOS(iph->tos);
 827	u8 prot = iph->protocol;
 828	u32 mark = skb->mark;
 
 829
 830	rt = (struct rtable *) dst;
 831
 832	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
 833	__ip_do_redirect(rt, skb, &fl4, true);
 834}
 835
 836static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
 
 837{
 838	struct rtable *rt = (struct rtable *)dst;
 839	struct dst_entry *ret = dst;
 840
 841	if (rt) {
 842		if (dst->obsolete > 0) {
 843			ip_rt_put(rt);
 844			ret = NULL;
 845		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
 846			   rt->dst.expires) {
 847			ip_rt_put(rt);
 848			ret = NULL;
 849		}
 850	}
 851	return ret;
 852}
 853
 854/*
 855 * Algorithm:
 856 *	1. The first ip_rt_redirect_number redirects are sent
 857 *	   with exponential backoff, then we stop sending them at all,
 858 *	   assuming that the host ignores our redirects.
 859 *	2. If we did not see packets requiring redirects
 860 *	   during ip_rt_redirect_silence, we assume that the host
 861 *	   forgot redirected route and start to send redirects again.
 862 *
 863 * This algorithm is much cheaper and more intelligent than dumb load limiting
 864 * in icmp.c.
 865 *
 866 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
 867 * and "frag. need" (breaks PMTU discovery) in icmp.c.
 868 */
 869
 870void ip_rt_send_redirect(struct sk_buff *skb)
 871{
 872	struct rtable *rt = skb_rtable(skb);
 873	struct in_device *in_dev;
 874	struct inet_peer *peer;
 875	struct net *net;
 876	int log_martians;
 877	int vif;
 878
 879	rcu_read_lock();
 880	in_dev = __in_dev_get_rcu(rt->dst.dev);
 881	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
 882		rcu_read_unlock();
 883		return;
 884	}
 885	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
 886	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
 887	rcu_read_unlock();
 888
 889	net = dev_net(rt->dst.dev);
 890	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
 891	if (!peer) {
 
 892		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
 893			  rt_nexthop(rt, ip_hdr(skb)->daddr));
 894		return;
 895	}
 896
 897	/* No redirected packets during ip_rt_redirect_silence;
 898	 * reset the algorithm.
 899	 */
 900	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
 901		peer->rate_tokens = 0;
 902		peer->n_redirects = 0;
 903	}
 904
 905	/* Too many ignored redirects; do not send anything
 906	 * set dst.rate_last to the last seen redirected packet.
 907	 */
 908	if (peer->n_redirects >= ip_rt_redirect_number) {
 909		peer->rate_last = jiffies;
 910		goto out_put_peer;
 911	}
 912
 913	/* Check for load limit; set rate_last to the latest sent
 914	 * redirect.
 915	 */
 916	if (peer->n_redirects == 0 ||
 917	    time_after(jiffies,
 918		       (peer->rate_last +
 919			(ip_rt_redirect_load << peer->n_redirects)))) {
 920		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
 921
 922		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
 923		peer->rate_last = jiffies;
 924		++peer->n_redirects;
 925#ifdef CONFIG_IP_ROUTE_VERBOSE
 926		if (log_martians &&
 927		    peer->n_redirects == ip_rt_redirect_number)
 928			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
 929					     &ip_hdr(skb)->saddr, inet_iif(skb),
 930					     &ip_hdr(skb)->daddr, &gw);
 931#endif
 932	}
 933out_put_peer:
 934	inet_putpeer(peer);
 935}
 936
 937static int ip_error(struct sk_buff *skb)
 938{
 939	struct rtable *rt = skb_rtable(skb);
 940	struct net_device *dev = skb->dev;
 941	struct in_device *in_dev;
 942	struct inet_peer *peer;
 943	unsigned long now;
 944	struct net *net;
 
 945	bool send;
 946	int code;
 947
 948	if (netif_is_l3_master(skb->dev)) {
 949		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
 950		if (!dev)
 951			goto out;
 952	}
 953
 954	in_dev = __in_dev_get_rcu(dev);
 955
 956	/* IP on this device is disabled. */
 957	if (!in_dev)
 958		goto out;
 959
 960	net = dev_net(rt->dst.dev);
 961	if (!IN_DEV_FORWARD(in_dev)) {
 962		switch (rt->dst.error) {
 963		case EHOSTUNREACH:
 
 964			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
 965			break;
 966
 967		case ENETUNREACH:
 
 968			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 969			break;
 970		}
 971		goto out;
 972	}
 973
 974	switch (rt->dst.error) {
 975	case EINVAL:
 976	default:
 977		goto out;
 978	case EHOSTUNREACH:
 979		code = ICMP_HOST_UNREACH;
 980		break;
 981	case ENETUNREACH:
 982		code = ICMP_NET_UNREACH;
 
 983		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 984		break;
 985	case EACCES:
 986		code = ICMP_PKT_FILTERED;
 987		break;
 988	}
 989
 
 990	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
 991			       l3mdev_master_ifindex(skb->dev), 1);
 992
 993	send = true;
 994	if (peer) {
 995		now = jiffies;
 996		peer->rate_tokens += now - peer->rate_last;
 997		if (peer->rate_tokens > ip_rt_error_burst)
 998			peer->rate_tokens = ip_rt_error_burst;
 999		peer->rate_last = now;
1000		if (peer->rate_tokens >= ip_rt_error_cost)
1001			peer->rate_tokens -= ip_rt_error_cost;
1002		else
1003			send = false;
1004		inet_putpeer(peer);
1005	}
 
 
1006	if (send)
1007		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1008
1009out:	kfree_skb(skb);
1010	return 0;
1011}
1012
1013static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1014{
1015	struct dst_entry *dst = &rt->dst;
1016	struct net *net = dev_net(dst->dev);
1017	struct fib_result res;
1018	bool lock = false;
 
1019	u32 old_mtu;
1020
1021	if (ip_mtu_locked(dst))
1022		return;
1023
1024	old_mtu = ipv4_mtu(dst);
1025	if (old_mtu < mtu)
1026		return;
1027
1028	if (mtu < ip_rt_min_pmtu) {
 
 
1029		lock = true;
1030		mtu = min(old_mtu, ip_rt_min_pmtu);
1031	}
1032
1033	if (rt->rt_pmtu == mtu && !lock &&
1034	    time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1035		return;
1036
1037	rcu_read_lock();
1038	if (fib_lookup(net, fl4, &res, 0) == 0) {
1039		struct fib_nh_common *nhc;
1040
1041		fib_select_path(net, &res, fl4, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
1042		nhc = FIB_RES_NHC(res);
1043		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1044				      jiffies + ip_rt_mtu_expires);
1045	}
 
1046	rcu_read_unlock();
1047}
1048
1049static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1050			      struct sk_buff *skb, u32 mtu,
1051			      bool confirm_neigh)
1052{
1053	struct rtable *rt = (struct rtable *) dst;
1054	struct flowi4 fl4;
1055
1056	ip_rt_build_flow_key(&fl4, sk, skb);
1057
1058	/* Don't make lookup fail for bridged encapsulations */
1059	if (skb && netif_is_any_bridge_port(skb->dev))
1060		fl4.flowi4_oif = 0;
1061
1062	__ip_rt_update_pmtu(rt, &fl4, mtu);
1063}
1064
1065void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1066		      int oif, u8 protocol)
1067{
1068	const struct iphdr *iph = (const struct iphdr *)skb->data;
1069	struct flowi4 fl4;
1070	struct rtable *rt;
1071	u32 mark = IP4_REPLY_MARK(net, skb->mark);
1072
1073	__build_flow_key(net, &fl4, NULL, iph, oif,
1074			 RT_TOS(iph->tos), protocol, mark, 0);
1075	rt = __ip_route_output_key(net, &fl4);
1076	if (!IS_ERR(rt)) {
1077		__ip_rt_update_pmtu(rt, &fl4, mtu);
1078		ip_rt_put(rt);
1079	}
1080}
1081EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1082
1083static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1084{
1085	const struct iphdr *iph = (const struct iphdr *)skb->data;
1086	struct flowi4 fl4;
1087	struct rtable *rt;
1088
1089	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1090
1091	if (!fl4.flowi4_mark)
1092		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1093
1094	rt = __ip_route_output_key(sock_net(sk), &fl4);
1095	if (!IS_ERR(rt)) {
1096		__ip_rt_update_pmtu(rt, &fl4, mtu);
1097		ip_rt_put(rt);
1098	}
1099}
1100
1101void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1102{
1103	const struct iphdr *iph = (const struct iphdr *)skb->data;
1104	struct flowi4 fl4;
1105	struct rtable *rt;
1106	struct dst_entry *odst = NULL;
1107	bool new = false;
1108	struct net *net = sock_net(sk);
1109
1110	bh_lock_sock(sk);
1111
1112	if (!ip_sk_accept_pmtu(sk))
1113		goto out;
1114
1115	odst = sk_dst_get(sk);
1116
1117	if (sock_owned_by_user(sk) || !odst) {
1118		__ipv4_sk_update_pmtu(skb, sk, mtu);
1119		goto out;
1120	}
1121
1122	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1123
1124	rt = (struct rtable *)odst;
1125	if (odst->obsolete && !odst->ops->check(odst, 0)) {
1126		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1127		if (IS_ERR(rt))
1128			goto out;
1129
1130		new = true;
1131	}
1132
1133	__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
1134
1135	if (!dst_check(&rt->dst, 0)) {
1136		if (new)
1137			dst_release(&rt->dst);
1138
1139		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1140		if (IS_ERR(rt))
1141			goto out;
1142
1143		new = true;
1144	}
1145
1146	if (new)
1147		sk_dst_set(sk, &rt->dst);
1148
1149out:
1150	bh_unlock_sock(sk);
1151	dst_release(odst);
1152}
1153EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1154
1155void ipv4_redirect(struct sk_buff *skb, struct net *net,
1156		   int oif, u8 protocol)
1157{
1158	const struct iphdr *iph = (const struct iphdr *)skb->data;
1159	struct flowi4 fl4;
1160	struct rtable *rt;
1161
1162	__build_flow_key(net, &fl4, NULL, iph, oif,
1163			 RT_TOS(iph->tos), protocol, 0, 0);
1164	rt = __ip_route_output_key(net, &fl4);
1165	if (!IS_ERR(rt)) {
1166		__ip_do_redirect(rt, skb, &fl4, false);
1167		ip_rt_put(rt);
1168	}
1169}
1170EXPORT_SYMBOL_GPL(ipv4_redirect);
1171
1172void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1173{
1174	const struct iphdr *iph = (const struct iphdr *)skb->data;
1175	struct flowi4 fl4;
1176	struct rtable *rt;
1177	struct net *net = sock_net(sk);
1178
1179	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1180	rt = __ip_route_output_key(net, &fl4);
1181	if (!IS_ERR(rt)) {
1182		__ip_do_redirect(rt, skb, &fl4, false);
1183		ip_rt_put(rt);
1184	}
1185}
1186EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1187
1188INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1189							 u32 cookie)
1190{
1191	struct rtable *rt = (struct rtable *) dst;
1192
1193	/* All IPV4 dsts are created with ->obsolete set to the value
1194	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1195	 * into this function always.
1196	 *
1197	 * When a PMTU/redirect information update invalidates a route,
1198	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1199	 * DST_OBSOLETE_DEAD.
1200	 */
1201	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1202		return NULL;
1203	return dst;
1204}
1205EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1206
1207static void ipv4_send_dest_unreach(struct sk_buff *skb)
1208{
 
1209	struct ip_options opt;
1210	int res;
1211
1212	/* Recompile ip options since IPCB may not be valid anymore.
1213	 * Also check we have a reasonable ipv4 header.
1214	 */
1215	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1216	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1217		return;
1218
1219	memset(&opt, 0, sizeof(opt));
1220	if (ip_hdr(skb)->ihl > 5) {
1221		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1222			return;
1223		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1224
1225		rcu_read_lock();
1226		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
 
1227		rcu_read_unlock();
1228
1229		if (res)
1230			return;
1231	}
1232	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1233}
1234
1235static void ipv4_link_failure(struct sk_buff *skb)
1236{
1237	struct rtable *rt;
1238
1239	ipv4_send_dest_unreach(skb);
1240
1241	rt = skb_rtable(skb);
1242	if (rt)
1243		dst_set_expires(&rt->dst, 0);
1244}
1245
1246static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1247{
1248	pr_debug("%s: %pI4 -> %pI4, %s\n",
1249		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1250		 skb->dev ? skb->dev->name : "?");
1251	kfree_skb(skb);
1252	WARN_ON(1);
1253	return 0;
1254}
1255
1256/*
1257 * We do not cache source address of outgoing interface,
1258 * because it is used only by IP RR, TS and SRR options,
1259 * so that it out of fast path.
1260 *
1261 * BTW remember: "addr" is allowed to be not aligned
1262 * in IP options!
1263 */
1264
1265void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1266{
1267	__be32 src;
1268
1269	if (rt_is_output_route(rt))
1270		src = ip_hdr(skb)->saddr;
1271	else {
1272		struct fib_result res;
1273		struct iphdr *iph = ip_hdr(skb);
1274		struct flowi4 fl4 = {
1275			.daddr = iph->daddr,
1276			.saddr = iph->saddr,
1277			.flowi4_tos = RT_TOS(iph->tos),
1278			.flowi4_oif = rt->dst.dev->ifindex,
1279			.flowi4_iif = skb->dev->ifindex,
1280			.flowi4_mark = skb->mark,
1281		};
1282
1283		rcu_read_lock();
1284		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1285			src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1286		else
1287			src = inet_select_addr(rt->dst.dev,
1288					       rt_nexthop(rt, iph->daddr),
1289					       RT_SCOPE_UNIVERSE);
1290		rcu_read_unlock();
1291	}
1292	memcpy(addr, &src, 4);
1293}
1294
1295#ifdef CONFIG_IP_ROUTE_CLASSID
1296static void set_class_tag(struct rtable *rt, u32 tag)
1297{
1298	if (!(rt->dst.tclassid & 0xFFFF))
1299		rt->dst.tclassid |= tag & 0xFFFF;
1300	if (!(rt->dst.tclassid & 0xFFFF0000))
1301		rt->dst.tclassid |= tag & 0xFFFF0000;
1302}
1303#endif
1304
1305static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1306{
1307	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1308	unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1309				    ip_rt_min_advmss);
 
 
 
 
 
 
1310
1311	return min(advmss, IPV4_MAX_PMTU - header_size);
1312}
1313
1314INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1315{
1316	const struct rtable *rt = (const struct rtable *)dst;
1317	unsigned int mtu = rt->rt_pmtu;
1318
1319	if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1320		mtu = dst_metric_raw(dst, RTAX_MTU);
1321
1322	if (mtu)
1323		goto out;
1324
1325	mtu = READ_ONCE(dst->dev->mtu);
1326
1327	if (unlikely(ip_mtu_locked(dst))) {
1328		if (rt->rt_uses_gateway && mtu > 576)
1329			mtu = 576;
1330	}
1331
1332out:
1333	mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1334
1335	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1336}
1337EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1338
1339static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1340{
1341	struct fnhe_hash_bucket *hash;
1342	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1343	u32 hval = fnhe_hashfun(daddr);
1344
1345	spin_lock_bh(&fnhe_lock);
1346
1347	hash = rcu_dereference_protected(nhc->nhc_exceptions,
1348					 lockdep_is_held(&fnhe_lock));
1349	hash += hval;
1350
1351	fnhe_p = &hash->chain;
1352	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1353	while (fnhe) {
1354		if (fnhe->fnhe_daddr == daddr) {
1355			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1356				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1357			/* set fnhe_daddr to 0 to ensure it won't bind with
1358			 * new dsts in rt_bind_exception().
1359			 */
1360			fnhe->fnhe_daddr = 0;
1361			fnhe_flush_routes(fnhe);
1362			kfree_rcu(fnhe, rcu);
1363			break;
1364		}
1365		fnhe_p = &fnhe->fnhe_next;
1366		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1367						 lockdep_is_held(&fnhe_lock));
1368	}
1369
1370	spin_unlock_bh(&fnhe_lock);
1371}
1372
1373static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1374					       __be32 daddr)
1375{
1376	struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1377	struct fib_nh_exception *fnhe;
1378	u32 hval;
1379
1380	if (!hash)
1381		return NULL;
1382
1383	hval = fnhe_hashfun(daddr);
1384
1385	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1386	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1387		if (fnhe->fnhe_daddr == daddr) {
1388			if (fnhe->fnhe_expires &&
1389			    time_after(jiffies, fnhe->fnhe_expires)) {
1390				ip_del_fnhe(nhc, daddr);
1391				break;
1392			}
1393			return fnhe;
1394		}
1395	}
1396	return NULL;
1397}
1398
1399/* MTU selection:
1400 * 1. mtu on route is locked - use it
1401 * 2. mtu from nexthop exception
1402 * 3. mtu from egress device
1403 */
1404
1405u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1406{
1407	struct fib_nh_common *nhc = res->nhc;
1408	struct net_device *dev = nhc->nhc_dev;
1409	struct fib_info *fi = res->fi;
1410	u32 mtu = 0;
1411
1412	if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1413	    fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1414		mtu = fi->fib_mtu;
1415
1416	if (likely(!mtu)) {
1417		struct fib_nh_exception *fnhe;
1418
1419		fnhe = find_exception(nhc, daddr);
1420		if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1421			mtu = fnhe->fnhe_pmtu;
1422	}
1423
1424	if (likely(!mtu))
1425		mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1426
1427	return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1428}
1429
1430static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1431			      __be32 daddr, const bool do_cache)
1432{
1433	bool ret = false;
1434
1435	spin_lock_bh(&fnhe_lock);
1436
1437	if (daddr == fnhe->fnhe_daddr) {
1438		struct rtable __rcu **porig;
1439		struct rtable *orig;
1440		int genid = fnhe_genid(dev_net(rt->dst.dev));
1441
1442		if (rt_is_input_route(rt))
1443			porig = &fnhe->fnhe_rth_input;
1444		else
1445			porig = &fnhe->fnhe_rth_output;
1446		orig = rcu_dereference(*porig);
1447
1448		if (fnhe->fnhe_genid != genid) {
1449			fnhe->fnhe_genid = genid;
1450			fnhe->fnhe_gw = 0;
1451			fnhe->fnhe_pmtu = 0;
1452			fnhe->fnhe_expires = 0;
1453			fnhe->fnhe_mtu_locked = false;
1454			fnhe_flush_routes(fnhe);
1455			orig = NULL;
1456		}
1457		fill_route_from_fnhe(rt, fnhe);
1458		if (!rt->rt_gw4) {
1459			rt->rt_gw4 = daddr;
1460			rt->rt_gw_family = AF_INET;
1461		}
1462
1463		if (do_cache) {
1464			dst_hold(&rt->dst);
1465			rcu_assign_pointer(*porig, rt);
1466			if (orig) {
1467				dst_dev_put(&orig->dst);
1468				dst_release(&orig->dst);
1469			}
1470			ret = true;
1471		}
1472
1473		fnhe->fnhe_stamp = jiffies;
1474	}
1475	spin_unlock_bh(&fnhe_lock);
1476
1477	return ret;
1478}
1479
1480static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1481{
1482	struct rtable *orig, *prev, **p;
1483	bool ret = true;
1484
1485	if (rt_is_input_route(rt)) {
1486		p = (struct rtable **)&nhc->nhc_rth_input;
1487	} else {
1488		p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1489	}
1490	orig = *p;
1491
1492	/* hold dst before doing cmpxchg() to avoid race condition
1493	 * on this dst
1494	 */
1495	dst_hold(&rt->dst);
1496	prev = cmpxchg(p, orig, rt);
1497	if (prev == orig) {
1498		if (orig) {
1499			rt_add_uncached_list(orig);
1500			dst_release(&orig->dst);
1501		}
1502	} else {
1503		dst_release(&rt->dst);
1504		ret = false;
1505	}
1506
1507	return ret;
1508}
1509
1510struct uncached_list {
1511	spinlock_t		lock;
1512	struct list_head	head;
1513};
1514
1515static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1516
1517void rt_add_uncached_list(struct rtable *rt)
1518{
1519	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1520
1521	rt->rt_uncached_list = ul;
1522
1523	spin_lock_bh(&ul->lock);
1524	list_add_tail(&rt->rt_uncached, &ul->head);
1525	spin_unlock_bh(&ul->lock);
1526}
1527
1528void rt_del_uncached_list(struct rtable *rt)
1529{
1530	if (!list_empty(&rt->rt_uncached)) {
1531		struct uncached_list *ul = rt->rt_uncached_list;
1532
1533		spin_lock_bh(&ul->lock);
1534		list_del(&rt->rt_uncached);
1535		spin_unlock_bh(&ul->lock);
1536	}
1537}
1538
1539static void ipv4_dst_destroy(struct dst_entry *dst)
1540{
1541	struct rtable *rt = (struct rtable *)dst;
1542
1543	ip_dst_metrics_put(dst);
1544	rt_del_uncached_list(rt);
1545}
1546
1547void rt_flush_dev(struct net_device *dev)
1548{
1549	struct rtable *rt;
1550	int cpu;
1551
1552	for_each_possible_cpu(cpu) {
1553		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1554
 
 
 
1555		spin_lock_bh(&ul->lock);
1556		list_for_each_entry(rt, &ul->head, rt_uncached) {
1557			if (rt->dst.dev != dev)
1558				continue;
1559			rt->dst.dev = blackhole_netdev;
1560			dev_hold(rt->dst.dev);
1561			dev_put(dev);
 
1562		}
1563		spin_unlock_bh(&ul->lock);
1564	}
1565}
1566
1567static bool rt_cache_valid(const struct rtable *rt)
1568{
1569	return	rt &&
1570		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1571		!rt_is_expired(rt);
1572}
1573
1574static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1575			   const struct fib_result *res,
1576			   struct fib_nh_exception *fnhe,
1577			   struct fib_info *fi, u16 type, u32 itag,
1578			   const bool do_cache)
1579{
1580	bool cached = false;
1581
1582	if (fi) {
1583		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1584
1585		if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1586			rt->rt_uses_gateway = 1;
1587			rt->rt_gw_family = nhc->nhc_gw_family;
1588			/* only INET and INET6 are supported */
1589			if (likely(nhc->nhc_gw_family == AF_INET))
1590				rt->rt_gw4 = nhc->nhc_gw.ipv4;
1591			else
1592				rt->rt_gw6 = nhc->nhc_gw.ipv6;
1593		}
1594
1595		ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1596
1597#ifdef CONFIG_IP_ROUTE_CLASSID
1598		if (nhc->nhc_family == AF_INET) {
1599			struct fib_nh *nh;
1600
1601			nh = container_of(nhc, struct fib_nh, nh_common);
1602			rt->dst.tclassid = nh->nh_tclassid;
1603		}
1604#endif
1605		rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1606		if (unlikely(fnhe))
1607			cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1608		else if (do_cache)
1609			cached = rt_cache_route(nhc, rt);
1610		if (unlikely(!cached)) {
1611			/* Routes we intend to cache in nexthop exception or
1612			 * FIB nexthop have the DST_NOCACHE bit clear.
1613			 * However, if we are unsuccessful at storing this
1614			 * route into the cache we really need to set it.
1615			 */
1616			if (!rt->rt_gw4) {
1617				rt->rt_gw_family = AF_INET;
1618				rt->rt_gw4 = daddr;
1619			}
1620			rt_add_uncached_list(rt);
1621		}
1622	} else
1623		rt_add_uncached_list(rt);
1624
1625#ifdef CONFIG_IP_ROUTE_CLASSID
1626#ifdef CONFIG_IP_MULTIPLE_TABLES
1627	set_class_tag(rt, res->tclassid);
1628#endif
1629	set_class_tag(rt, itag);
1630#endif
1631}
1632
1633struct rtable *rt_dst_alloc(struct net_device *dev,
1634			    unsigned int flags, u16 type,
1635			    bool nopolicy, bool noxfrm)
1636{
1637	struct rtable *rt;
1638
1639	rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1640		       (nopolicy ? DST_NOPOLICY : 0) |
1641		       (noxfrm ? DST_NOXFRM : 0));
1642
1643	if (rt) {
1644		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1645		rt->rt_flags = flags;
1646		rt->rt_type = type;
1647		rt->rt_is_input = 0;
1648		rt->rt_iif = 0;
1649		rt->rt_pmtu = 0;
1650		rt->rt_mtu_locked = 0;
1651		rt->rt_uses_gateway = 0;
1652		rt->rt_gw_family = 0;
1653		rt->rt_gw4 = 0;
1654		INIT_LIST_HEAD(&rt->rt_uncached);
1655
1656		rt->dst.output = ip_output;
1657		if (flags & RTCF_LOCAL)
1658			rt->dst.input = ip_local_deliver;
1659	}
1660
1661	return rt;
1662}
1663EXPORT_SYMBOL(rt_dst_alloc);
1664
1665struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1666{
1667	struct rtable *new_rt;
1668
1669	new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1670			   rt->dst.flags);
1671
1672	if (new_rt) {
1673		new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1674		new_rt->rt_flags = rt->rt_flags;
1675		new_rt->rt_type = rt->rt_type;
1676		new_rt->rt_is_input = rt->rt_is_input;
1677		new_rt->rt_iif = rt->rt_iif;
1678		new_rt->rt_pmtu = rt->rt_pmtu;
1679		new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1680		new_rt->rt_gw_family = rt->rt_gw_family;
1681		if (rt->rt_gw_family == AF_INET)
1682			new_rt->rt_gw4 = rt->rt_gw4;
1683		else if (rt->rt_gw_family == AF_INET6)
1684			new_rt->rt_gw6 = rt->rt_gw6;
1685		INIT_LIST_HEAD(&new_rt->rt_uncached);
1686
1687		new_rt->dst.input = rt->dst.input;
1688		new_rt->dst.output = rt->dst.output;
1689		new_rt->dst.error = rt->dst.error;
1690		new_rt->dst.lastuse = jiffies;
1691		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1692	}
1693	return new_rt;
1694}
1695EXPORT_SYMBOL(rt_dst_clone);
1696
1697/* called in rcu_read_lock() section */
1698int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1699			  u8 tos, struct net_device *dev,
1700			  struct in_device *in_dev, u32 *itag)
 
1701{
1702	int err;
1703
1704	/* Primary sanity checks. */
1705	if (!in_dev)
1706		return -EINVAL;
 
 
 
1707
1708	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1709	    skb->protocol != htons(ETH_P_IP))
1710		return -EINVAL;
1711
1712	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1713		return -EINVAL;
1714
1715	if (ipv4_is_zeronet(saddr)) {
1716		if (!ipv4_is_local_multicast(daddr) &&
1717		    ip_hdr(skb)->protocol != IPPROTO_IGMP)
1718			return -EINVAL;
1719	} else {
1720		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1721					  in_dev, itag);
1722		if (err < 0)
1723			return err;
1724	}
1725	return 0;
1726}
1727
1728/* called in rcu_read_lock() section */
1729static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1730			     u8 tos, struct net_device *dev, int our)
 
1731{
1732	struct in_device *in_dev = __in_dev_get_rcu(dev);
1733	unsigned int flags = RTCF_MULTICAST;
 
1734	struct rtable *rth;
1735	u32 itag = 0;
1736	int err;
1737
1738	err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1739	if (err)
1740		return err;
 
1741
1742	if (our)
1743		flags |= RTCF_LOCAL;
1744
 
 
 
1745	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1746			   IN_DEV_ORCONF(in_dev, NOPOLICY), false);
1747	if (!rth)
1748		return -ENOBUFS;
1749
1750#ifdef CONFIG_IP_ROUTE_CLASSID
1751	rth->dst.tclassid = itag;
1752#endif
1753	rth->dst.output = ip_rt_bug;
1754	rth->rt_is_input= 1;
1755
1756#ifdef CONFIG_IP_MROUTE
1757	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1758		rth->dst.input = ip_mr_input;
1759#endif
1760	RT_CACHE_STAT_INC(in_slow_mc);
1761
 
1762	skb_dst_set(skb, &rth->dst);
1763	return 0;
1764}
1765
1766
1767static void ip_handle_martian_source(struct net_device *dev,
1768				     struct in_device *in_dev,
1769				     struct sk_buff *skb,
1770				     __be32 daddr,
1771				     __be32 saddr)
1772{
1773	RT_CACHE_STAT_INC(in_martian_src);
1774#ifdef CONFIG_IP_ROUTE_VERBOSE
1775	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1776		/*
1777		 *	RFC1812 recommendation, if source is martian,
1778		 *	the only hint is MAC header.
1779		 */
1780		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1781			&daddr, &saddr, dev->name);
1782		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1783			print_hex_dump(KERN_WARNING, "ll header: ",
1784				       DUMP_PREFIX_OFFSET, 16, 1,
1785				       skb_mac_header(skb),
1786				       dev->hard_header_len, false);
1787		}
1788	}
1789#endif
1790}
1791
1792/* called in rcu_read_lock() section */
1793static int __mkroute_input(struct sk_buff *skb,
1794			   const struct fib_result *res,
1795			   struct in_device *in_dev,
1796			   __be32 daddr, __be32 saddr, u32 tos)
1797{
 
1798	struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1799	struct net_device *dev = nhc->nhc_dev;
1800	struct fib_nh_exception *fnhe;
1801	struct rtable *rth;
1802	int err;
1803	struct in_device *out_dev;
1804	bool do_cache;
1805	u32 itag = 0;
1806
1807	/* get a working reference to the output device */
1808	out_dev = __in_dev_get_rcu(dev);
1809	if (!out_dev) {
1810		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1811		return -EINVAL;
1812	}
1813
1814	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1815				  in_dev->dev, in_dev, &itag);
1816	if (err < 0) {
 
1817		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1818					 saddr);
1819
1820		goto cleanup;
1821	}
1822
1823	do_cache = res->fi && !itag;
1824	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1825	    skb->protocol == htons(ETH_P_IP)) {
1826		__be32 gw;
1827
1828		gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1829		if (IN_DEV_SHARED_MEDIA(out_dev) ||
1830		    inet_addr_onlink(out_dev, saddr, gw))
1831			IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1832	}
1833
1834	if (skb->protocol != htons(ETH_P_IP)) {
1835		/* Not IP (i.e. ARP). Do not create route, if it is
1836		 * invalid for proxy arp. DNAT routes are always valid.
1837		 *
1838		 * Proxy arp feature have been extended to allow, ARP
1839		 * replies back to the same interface, to support
1840		 * Private VLAN switch technologies. See arp.c.
1841		 */
1842		if (out_dev == in_dev &&
1843		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1844			err = -EINVAL;
1845			goto cleanup;
1846		}
1847	}
1848
 
 
 
1849	fnhe = find_exception(nhc, daddr);
1850	if (do_cache) {
1851		if (fnhe)
1852			rth = rcu_dereference(fnhe->fnhe_rth_input);
1853		else
1854			rth = rcu_dereference(nhc->nhc_rth_input);
1855		if (rt_cache_valid(rth)) {
1856			skb_dst_set_noref(skb, &rth->dst);
1857			goto out;
1858		}
1859	}
1860
1861	rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1862			   IN_DEV_ORCONF(in_dev, NOPOLICY),
1863			   IN_DEV_ORCONF(out_dev, NOXFRM));
1864	if (!rth) {
1865		err = -ENOBUFS;
1866		goto cleanup;
1867	}
1868
1869	rth->rt_is_input = 1;
1870	RT_CACHE_STAT_INC(in_slow_tot);
1871
1872	rth->dst.input = ip_forward;
1873
1874	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1875		       do_cache);
1876	lwtunnel_set_redirect(&rth->dst);
1877	skb_dst_set(skb, &rth->dst);
1878out:
1879	err = 0;
1880 cleanup:
1881	return err;
1882}
1883
1884#ifdef CONFIG_IP_ROUTE_MULTIPATH
1885/* To make ICMP packets follow the right flow, the multipath hash is
1886 * calculated from the inner IP addresses.
1887 */
1888static void ip_multipath_l3_keys(const struct sk_buff *skb,
1889				 struct flow_keys *hash_keys)
1890{
1891	const struct iphdr *outer_iph = ip_hdr(skb);
1892	const struct iphdr *key_iph = outer_iph;
1893	const struct iphdr *inner_iph;
1894	const struct icmphdr *icmph;
1895	struct iphdr _inner_iph;
1896	struct icmphdr _icmph;
1897
1898	if (likely(outer_iph->protocol != IPPROTO_ICMP))
1899		goto out;
1900
1901	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1902		goto out;
1903
1904	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1905				   &_icmph);
1906	if (!icmph)
1907		goto out;
1908
1909	if (!icmp_is_err(icmph->type))
1910		goto out;
1911
1912	inner_iph = skb_header_pointer(skb,
1913				       outer_iph->ihl * 4 + sizeof(_icmph),
1914				       sizeof(_inner_iph), &_inner_iph);
1915	if (!inner_iph)
1916		goto out;
1917
1918	key_iph = inner_iph;
1919out:
1920	hash_keys->addrs.v4addrs.src = key_iph->saddr;
1921	hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1922}
1923
1924static u32 fib_multipath_custom_hash_outer(const struct net *net,
1925					   const struct sk_buff *skb,
1926					   bool *p_has_inner)
1927{
1928	u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
1929	struct flow_keys keys, hash_keys;
1930
1931	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1932		return 0;
1933
1934	memset(&hash_keys, 0, sizeof(hash_keys));
1935	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1936
1937	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1938	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1939		hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1940	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1941		hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1942	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1943		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1944	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1945		hash_keys.ports.src = keys.ports.src;
1946	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1947		hash_keys.ports.dst = keys.ports.dst;
1948
1949	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1950	return flow_hash_from_keys(&hash_keys);
1951}
1952
1953static u32 fib_multipath_custom_hash_inner(const struct net *net,
1954					   const struct sk_buff *skb,
1955					   bool has_inner)
1956{
1957	u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
1958	struct flow_keys keys, hash_keys;
1959
1960	/* We assume the packet carries an encapsulation, but if none was
1961	 * encountered during dissection of the outer flow, then there is no
1962	 * point in calling the flow dissector again.
1963	 */
1964	if (!has_inner)
1965		return 0;
1966
1967	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1968		return 0;
1969
1970	memset(&hash_keys, 0, sizeof(hash_keys));
1971	skb_flow_dissect_flow_keys(skb, &keys, 0);
1972
1973	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1974		return 0;
1975
1976	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1977		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1978		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1979			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1980		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1981			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1982	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1983		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1984		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1985			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1986		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1987			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1988		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1989			hash_keys.tags.flow_label = keys.tags.flow_label;
1990	}
1991
1992	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1993		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1994	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1995		hash_keys.ports.src = keys.ports.src;
1996	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
1997		hash_keys.ports.dst = keys.ports.dst;
1998
1999	return flow_hash_from_keys(&hash_keys);
2000}
2001
2002static u32 fib_multipath_custom_hash_skb(const struct net *net,
2003					 const struct sk_buff *skb)
2004{
2005	u32 mhash, mhash_inner;
2006	bool has_inner = true;
2007
2008	mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
2009	mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
2010
2011	return jhash_2words(mhash, mhash_inner, 0);
2012}
2013
2014static u32 fib_multipath_custom_hash_fl4(const struct net *net,
2015					 const struct flowi4 *fl4)
2016{
2017	u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
2018	struct flow_keys hash_keys;
2019
2020	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2021		return 0;
2022
2023	memset(&hash_keys, 0, sizeof(hash_keys));
2024	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2025	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2026		hash_keys.addrs.v4addrs.src = fl4->saddr;
2027	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2028		hash_keys.addrs.v4addrs.dst = fl4->daddr;
2029	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2030		hash_keys.basic.ip_proto = fl4->flowi4_proto;
2031	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2032		hash_keys.ports.src = fl4->fl4_sport;
2033	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2034		hash_keys.ports.dst = fl4->fl4_dport;
2035
2036	return flow_hash_from_keys(&hash_keys);
2037}
2038
2039/* if skb is set it will be used and fl4 can be NULL */
2040int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2041		       const struct sk_buff *skb, struct flow_keys *flkeys)
2042{
2043	u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2044	struct flow_keys hash_keys;
2045	u32 mhash = 0;
2046
2047	switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
2048	case 0:
2049		memset(&hash_keys, 0, sizeof(hash_keys));
2050		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2051		if (skb) {
2052			ip_multipath_l3_keys(skb, &hash_keys);
2053		} else {
2054			hash_keys.addrs.v4addrs.src = fl4->saddr;
2055			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2056		}
2057		mhash = flow_hash_from_keys(&hash_keys);
2058		break;
2059	case 1:
2060		/* skb is currently provided only when forwarding */
2061		if (skb) {
2062			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2063			struct flow_keys keys;
2064
2065			/* short-circuit if we already have L4 hash present */
2066			if (skb->l4_hash)
2067				return skb_get_hash_raw(skb) >> 1;
2068
2069			memset(&hash_keys, 0, sizeof(hash_keys));
2070
2071			if (!flkeys) {
2072				skb_flow_dissect_flow_keys(skb, &keys, flag);
2073				flkeys = &keys;
2074			}
2075
2076			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2077			hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2078			hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2079			hash_keys.ports.src = flkeys->ports.src;
2080			hash_keys.ports.dst = flkeys->ports.dst;
2081			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2082		} else {
2083			memset(&hash_keys, 0, sizeof(hash_keys));
2084			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2085			hash_keys.addrs.v4addrs.src = fl4->saddr;
2086			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2087			hash_keys.ports.src = fl4->fl4_sport;
2088			hash_keys.ports.dst = fl4->fl4_dport;
2089			hash_keys.basic.ip_proto = fl4->flowi4_proto;
2090		}
2091		mhash = flow_hash_from_keys(&hash_keys);
2092		break;
2093	case 2:
2094		memset(&hash_keys, 0, sizeof(hash_keys));
2095		/* skb is currently provided only when forwarding */
2096		if (skb) {
2097			struct flow_keys keys;
2098
2099			skb_flow_dissect_flow_keys(skb, &keys, 0);
2100			/* Inner can be v4 or v6 */
2101			if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2102				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2103				hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2104				hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2105			} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2106				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2107				hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2108				hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2109				hash_keys.tags.flow_label = keys.tags.flow_label;
2110				hash_keys.basic.ip_proto = keys.basic.ip_proto;
2111			} else {
2112				/* Same as case 0 */
2113				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2114				ip_multipath_l3_keys(skb, &hash_keys);
2115			}
2116		} else {
2117			/* Same as case 0 */
2118			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2119			hash_keys.addrs.v4addrs.src = fl4->saddr;
2120			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2121		}
2122		mhash = flow_hash_from_keys(&hash_keys);
2123		break;
2124	case 3:
2125		if (skb)
2126			mhash = fib_multipath_custom_hash_skb(net, skb);
2127		else
2128			mhash = fib_multipath_custom_hash_fl4(net, fl4);
2129		break;
2130	}
2131
2132	if (multipath_hash)
2133		mhash = jhash_2words(mhash, multipath_hash, 0);
2134
2135	return mhash >> 1;
2136}
2137#endif /* CONFIG_IP_ROUTE_MULTIPATH */
2138
2139static int ip_mkroute_input(struct sk_buff *skb,
2140			    struct fib_result *res,
2141			    struct in_device *in_dev,
2142			    __be32 daddr, __be32 saddr, u32 tos,
2143			    struct flow_keys *hkeys)
2144{
2145#ifdef CONFIG_IP_ROUTE_MULTIPATH
2146	if (res->fi && fib_info_num_path(res->fi) > 1) {
2147		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2148
2149		fib_select_multipath(res, h);
 
2150	}
2151#endif
2152
2153	/* create a routing cache entry */
2154	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2155}
2156
2157/* Implements all the saddr-related checks as ip_route_input_slow(),
2158 * assuming daddr is valid and the destination is not a local broadcast one.
2159 * Uses the provided hint instead of performing a route lookup.
2160 */
2161int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2162		      u8 tos, struct net_device *dev,
2163		      const struct sk_buff *hint)
 
2164{
 
2165	struct in_device *in_dev = __in_dev_get_rcu(dev);
2166	struct rtable *rt = skb_rtable(hint);
2167	struct net *net = dev_net(dev);
2168	int err = -EINVAL;
2169	u32 tag = 0;
2170
2171	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
 
 
 
 
2172		goto martian_source;
 
2173
2174	if (ipv4_is_zeronet(saddr))
 
2175		goto martian_source;
 
2176
2177	if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
 
2178		goto martian_source;
 
2179
2180	if (rt->rt_type != RTN_LOCAL)
2181		goto skip_validate_source;
2182
2183	tos &= IPTOS_RT_MASK;
2184	err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2185	if (err < 0)
2186		goto martian_source;
2187
2188skip_validate_source:
2189	skb_dst_copy(skb, hint);
2190	return 0;
2191
2192martian_source:
2193	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2194	return err;
2195}
2196
2197/* get device for dst_alloc with local routes */
2198static struct net_device *ip_rt_get_dev(struct net *net,
2199					const struct fib_result *res)
2200{
2201	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2202	struct net_device *dev = NULL;
2203
2204	if (nhc)
2205		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2206
2207	return dev ? : net->loopback_dev;
2208}
2209
2210/*
2211 *	NOTE. We drop all the packets that has local source
2212 *	addresses, because every properly looped back packet
2213 *	must have correct destination already attached by output routine.
2214 *	Changes in the enforced policies must be applied also to
2215 *	ip_route_use_hint().
2216 *
2217 *	Such approach solves two big problems:
2218 *	1. Not simplex devices are handled properly.
2219 *	2. IP spoofing attempts are filtered with 100% of guarantee.
2220 *	called with rcu_read_lock()
2221 */
2222
2223static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2224			       u8 tos, struct net_device *dev,
2225			       struct fib_result *res)
 
2226{
 
2227	struct in_device *in_dev = __in_dev_get_rcu(dev);
2228	struct flow_keys *flkeys = NULL, _flkeys;
2229	struct net    *net = dev_net(dev);
2230	struct ip_tunnel_info *tun_info;
2231	int		err = -EINVAL;
2232	unsigned int	flags = 0;
2233	u32		itag = 0;
2234	struct rtable	*rth;
2235	struct flowi4	fl4;
2236	bool do_cache = true;
2237
2238	/* IP on this device is disabled. */
2239
2240	if (!in_dev)
2241		goto out;
2242
2243	/* Check for the most weird martians, which can be not detected
2244	 * by fib_lookup.
2245	 */
2246
2247	tun_info = skb_tunnel_info(skb);
2248	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2249		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2250	else
2251		fl4.flowi4_tun_key.tun_id = 0;
2252	skb_dst_drop(skb);
2253
2254	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
 
2255		goto martian_source;
 
2256
2257	res->fi = NULL;
2258	res->table = NULL;
2259	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2260		goto brd_input;
2261
2262	/* Accept zero addresses only to limited broadcast;
2263	 * I even do not know to fix it or not. Waiting for complains :-)
2264	 */
2265	if (ipv4_is_zeronet(saddr))
 
2266		goto martian_source;
 
2267
2268	if (ipv4_is_zeronet(daddr))
 
2269		goto martian_destination;
 
2270
2271	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2272	 * and call it once if daddr or/and saddr are loopback addresses
2273	 */
2274	if (ipv4_is_loopback(daddr)) {
2275		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
 
2276			goto martian_destination;
 
2277	} else if (ipv4_is_loopback(saddr)) {
2278		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
 
2279			goto martian_source;
 
2280	}
2281
2282	/*
2283	 *	Now we are ready to route packet.
2284	 */
 
2285	fl4.flowi4_oif = 0;
2286	fl4.flowi4_iif = dev->ifindex;
2287	fl4.flowi4_mark = skb->mark;
2288	fl4.flowi4_tos = tos;
2289	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2290	fl4.flowi4_flags = 0;
2291	fl4.daddr = daddr;
2292	fl4.saddr = saddr;
2293	fl4.flowi4_uid = sock_net_uid(net, NULL);
2294	fl4.flowi4_multipath_hash = 0;
2295
2296	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2297		flkeys = &_flkeys;
2298	} else {
2299		fl4.flowi4_proto = 0;
2300		fl4.fl4_sport = 0;
2301		fl4.fl4_dport = 0;
2302	}
2303
2304	err = fib_lookup(net, &fl4, res, 0);
2305	if (err != 0) {
2306		if (!IN_DEV_FORWARD(in_dev))
2307			err = -EHOSTUNREACH;
2308		goto no_route;
2309	}
2310
2311	if (res->type == RTN_BROADCAST) {
2312		if (IN_DEV_BFORWARD(in_dev))
2313			goto make_route;
2314		/* not do cache if bc_forwarding is enabled */
2315		if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2316			do_cache = false;
2317		goto brd_input;
2318	}
2319
 
2320	if (res->type == RTN_LOCAL) {
2321		err = fib_validate_source(skb, saddr, daddr, tos,
2322					  0, dev, in_dev, &itag);
2323		if (err < 0)
2324			goto martian_source;
2325		goto local_input;
2326	}
2327
2328	if (!IN_DEV_FORWARD(in_dev)) {
2329		err = -EHOSTUNREACH;
2330		goto no_route;
2331	}
2332	if (res->type != RTN_UNICAST)
 
2333		goto martian_destination;
 
2334
2335make_route:
2336	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2337out:	return err;
 
 
 
2338
2339brd_input:
2340	if (skb->protocol != htons(ETH_P_IP))
2341		goto e_inval;
 
 
2342
2343	if (!ipv4_is_zeronet(saddr)) {
2344		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2345					  in_dev, &itag);
2346		if (err < 0)
2347			goto martian_source;
2348	}
2349	flags |= RTCF_BROADCAST;
2350	res->type = RTN_BROADCAST;
2351	RT_CACHE_STAT_INC(in_brd);
2352
2353local_input:
 
 
 
2354	do_cache &= res->fi && !itag;
2355	if (do_cache) {
2356		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2357
2358		rth = rcu_dereference(nhc->nhc_rth_input);
2359		if (rt_cache_valid(rth)) {
2360			skb_dst_set_noref(skb, &rth->dst);
2361			err = 0;
2362			goto out;
2363		}
2364	}
2365
2366	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2367			   flags | RTCF_LOCAL, res->type,
2368			   IN_DEV_ORCONF(in_dev, NOPOLICY), false);
2369	if (!rth)
2370		goto e_nobufs;
2371
2372	rth->dst.output= ip_rt_bug;
2373#ifdef CONFIG_IP_ROUTE_CLASSID
2374	rth->dst.tclassid = itag;
2375#endif
2376	rth->rt_is_input = 1;
2377
2378	RT_CACHE_STAT_INC(in_slow_tot);
2379	if (res->type == RTN_UNREACHABLE) {
2380		rth->dst.input= ip_error;
2381		rth->dst.error= -err;
2382		rth->rt_flags	&= ~RTCF_LOCAL;
2383	}
2384
2385	if (do_cache) {
2386		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2387
2388		rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2389		if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2390			WARN_ON(rth->dst.input == lwtunnel_input);
2391			rth->dst.lwtstate->orig_input = rth->dst.input;
2392			rth->dst.input = lwtunnel_input;
2393		}
2394
2395		if (unlikely(!rt_cache_route(nhc, rth)))
2396			rt_add_uncached_list(rth);
2397	}
2398	skb_dst_set(skb, &rth->dst);
2399	err = 0;
2400	goto out;
2401
2402no_route:
2403	RT_CACHE_STAT_INC(in_no_route);
2404	res->type = RTN_UNREACHABLE;
2405	res->fi = NULL;
2406	res->table = NULL;
2407	goto local_input;
2408
2409	/*
2410	 *	Do not cache martian addresses: they should be logged (RFC1812)
2411	 */
2412martian_destination:
2413	RT_CACHE_STAT_INC(in_martian_dst);
2414#ifdef CONFIG_IP_ROUTE_VERBOSE
2415	if (IN_DEV_LOG_MARTIANS(in_dev))
2416		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2417				     &daddr, &saddr, dev->name);
2418#endif
2419
2420e_inval:
2421	err = -EINVAL;
2422	goto out;
2423
2424e_nobufs:
2425	err = -ENOBUFS;
2426	goto out;
2427
2428martian_source:
2429	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2430	goto out;
2431}
2432
2433int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2434			 u8 tos, struct net_device *dev)
2435{
2436	struct fib_result res;
2437	int err;
2438
2439	tos &= IPTOS_RT_MASK;
2440	rcu_read_lock();
2441	err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2442	rcu_read_unlock();
2443
2444	return err;
2445}
2446EXPORT_SYMBOL(ip_route_input_noref);
2447
2448/* called with rcu_read_lock held */
2449int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2450		       u8 tos, struct net_device *dev, struct fib_result *res)
 
 
2451{
2452	/* Multicast recognition logic is moved from route cache to here.
2453	 * The problem was that too many Ethernet cards have broken/missing
2454	 * hardware multicast filters :-( As result the host on multicasting
2455	 * network acquires a lot of useless route cache entries, sort of
2456	 * SDR messages from all the world. Now we try to get rid of them.
2457	 * Really, provided software IP multicast filter is organized
2458	 * reasonably (at least, hashed), it does not result in a slowdown
2459	 * comparing with route cache reject entries.
2460	 * Note, that multicast routers are not affected, because
2461	 * route cache entry is created eventually.
2462	 */
2463	if (ipv4_is_multicast(daddr)) {
 
2464		struct in_device *in_dev = __in_dev_get_rcu(dev);
2465		int our = 0;
2466		int err = -EINVAL;
2467
2468		if (!in_dev)
2469			return err;
 
2470		our = ip_check_mc_rcu(in_dev, daddr, saddr,
2471				      ip_hdr(skb)->protocol);
2472
2473		/* check l3 master if no match yet */
2474		if (!our && netif_is_l3_slave(dev)) {
2475			struct in_device *l3_in_dev;
2476
2477			l3_in_dev = __in_dev_get_rcu(skb->dev);
2478			if (l3_in_dev)
2479				our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2480						      ip_hdr(skb)->protocol);
2481		}
2482
2483		if (our
2484#ifdef CONFIG_IP_MROUTE
2485			||
2486		    (!ipv4_is_local_multicast(daddr) &&
2487		     IN_DEV_MFORWARD(in_dev))
2488#endif
2489		   ) {
2490			err = ip_route_input_mc(skb, daddr, saddr,
2491						tos, dev, our);
2492		}
2493		return err;
2494	}
2495
2496	return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2497}
 
2498
2499/* called with rcu_read_lock() */
2500static struct rtable *__mkroute_output(const struct fib_result *res,
2501				       const struct flowi4 *fl4, int orig_oif,
2502				       struct net_device *dev_out,
2503				       unsigned int flags)
2504{
2505	struct fib_info *fi = res->fi;
2506	struct fib_nh_exception *fnhe;
2507	struct in_device *in_dev;
2508	u16 type = res->type;
2509	struct rtable *rth;
2510	bool do_cache;
2511
2512	in_dev = __in_dev_get_rcu(dev_out);
2513	if (!in_dev)
2514		return ERR_PTR(-EINVAL);
2515
2516	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2517		if (ipv4_is_loopback(fl4->saddr) &&
2518		    !(dev_out->flags & IFF_LOOPBACK) &&
2519		    !netif_is_l3_master(dev_out))
2520			return ERR_PTR(-EINVAL);
2521
2522	if (ipv4_is_lbcast(fl4->daddr))
2523		type = RTN_BROADCAST;
2524	else if (ipv4_is_multicast(fl4->daddr))
2525		type = RTN_MULTICAST;
2526	else if (ipv4_is_zeronet(fl4->daddr))
2527		return ERR_PTR(-EINVAL);
2528
2529	if (dev_out->flags & IFF_LOOPBACK)
2530		flags |= RTCF_LOCAL;
2531
2532	do_cache = true;
2533	if (type == RTN_BROADCAST) {
2534		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2535		fi = NULL;
2536	} else if (type == RTN_MULTICAST) {
2537		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2538		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2539				     fl4->flowi4_proto))
2540			flags &= ~RTCF_LOCAL;
2541		else
2542			do_cache = false;
2543		/* If multicast route do not exist use
2544		 * default one, but do not gateway in this case.
2545		 * Yes, it is hack.
2546		 */
2547		if (fi && res->prefixlen < 4)
2548			fi = NULL;
2549	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2550		   (orig_oif != dev_out->ifindex)) {
2551		/* For local routes that require a particular output interface
2552		 * we do not want to cache the result.  Caching the result
2553		 * causes incorrect behaviour when there are multiple source
2554		 * addresses on the interface, the end result being that if the
2555		 * intended recipient is waiting on that interface for the
2556		 * packet he won't receive it because it will be delivered on
2557		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2558		 * be set to the loopback interface as well.
2559		 */
2560		do_cache = false;
2561	}
2562
2563	fnhe = NULL;
2564	do_cache &= fi != NULL;
2565	if (fi) {
2566		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2567		struct rtable __rcu **prth;
2568
2569		fnhe = find_exception(nhc, fl4->daddr);
2570		if (!do_cache)
2571			goto add;
2572		if (fnhe) {
2573			prth = &fnhe->fnhe_rth_output;
2574		} else {
2575			if (unlikely(fl4->flowi4_flags &
2576				     FLOWI_FLAG_KNOWN_NH &&
2577				     !(nhc->nhc_gw_family &&
2578				       nhc->nhc_scope == RT_SCOPE_LINK))) {
2579				do_cache = false;
2580				goto add;
2581			}
2582			prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2583		}
2584		rth = rcu_dereference(*prth);
2585		if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2586			return rth;
2587	}
2588
2589add:
2590	rth = rt_dst_alloc(dev_out, flags, type,
2591			   IN_DEV_ORCONF(in_dev, NOPOLICY),
2592			   IN_DEV_ORCONF(in_dev, NOXFRM));
2593	if (!rth)
2594		return ERR_PTR(-ENOBUFS);
2595
2596	rth->rt_iif = orig_oif;
2597
2598	RT_CACHE_STAT_INC(out_slow_tot);
2599
2600	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2601		if (flags & RTCF_LOCAL &&
2602		    !(dev_out->flags & IFF_LOOPBACK)) {
2603			rth->dst.output = ip_mc_output;
2604			RT_CACHE_STAT_INC(out_slow_mc);
2605		}
2606#ifdef CONFIG_IP_MROUTE
2607		if (type == RTN_MULTICAST) {
2608			if (IN_DEV_MFORWARD(in_dev) &&
2609			    !ipv4_is_local_multicast(fl4->daddr)) {
2610				rth->dst.input = ip_mr_input;
2611				rth->dst.output = ip_mc_output;
2612			}
2613		}
2614#endif
2615	}
2616
2617	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2618	lwtunnel_set_redirect(&rth->dst);
2619
2620	return rth;
2621}
2622
2623/*
2624 * Major route resolver routine.
2625 */
2626
2627struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2628					const struct sk_buff *skb)
2629{
2630	__u8 tos = RT_FL_TOS(fl4);
2631	struct fib_result res = {
2632		.type		= RTN_UNSPEC,
2633		.fi		= NULL,
2634		.table		= NULL,
2635		.tclassid	= 0,
2636	};
2637	struct rtable *rth;
2638
2639	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2640	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2641	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2642			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2643
2644	rcu_read_lock();
2645	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2646	rcu_read_unlock();
2647
2648	return rth;
2649}
2650EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2651
2652struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2653					    struct fib_result *res,
2654					    const struct sk_buff *skb)
2655{
2656	struct net_device *dev_out = NULL;
2657	int orig_oif = fl4->flowi4_oif;
2658	unsigned int flags = 0;
2659	struct rtable *rth;
2660	int err;
2661
2662	if (fl4->saddr) {
2663		if (ipv4_is_multicast(fl4->saddr) ||
2664		    ipv4_is_lbcast(fl4->saddr) ||
2665		    ipv4_is_zeronet(fl4->saddr)) {
2666			rth = ERR_PTR(-EINVAL);
2667			goto out;
2668		}
2669
2670		rth = ERR_PTR(-ENETUNREACH);
2671
2672		/* I removed check for oif == dev_out->oif here.
2673		 * It was wrong for two reasons:
2674		 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2675		 *    is assigned to multiple interfaces.
2676		 * 2. Moreover, we are allowed to send packets with saddr
2677		 *    of another iface. --ANK
2678		 */
2679
2680		if (fl4->flowi4_oif == 0 &&
2681		    (ipv4_is_multicast(fl4->daddr) ||
2682		     ipv4_is_lbcast(fl4->daddr))) {
2683			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2684			dev_out = __ip_dev_find(net, fl4->saddr, false);
2685			if (!dev_out)
2686				goto out;
2687
2688			/* Special hack: user can direct multicasts
2689			 * and limited broadcast via necessary interface
2690			 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2691			 * This hack is not just for fun, it allows
2692			 * vic,vat and friends to work.
2693			 * They bind socket to loopback, set ttl to zero
2694			 * and expect that it will work.
2695			 * From the viewpoint of routing cache they are broken,
2696			 * because we are not allowed to build multicast path
2697			 * with loopback source addr (look, routing cache
2698			 * cannot know, that ttl is zero, so that packet
2699			 * will not leave this host and route is valid).
2700			 * Luckily, this hack is good workaround.
2701			 */
2702
2703			fl4->flowi4_oif = dev_out->ifindex;
2704			goto make_route;
2705		}
2706
2707		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2708			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2709			if (!__ip_dev_find(net, fl4->saddr, false))
2710				goto out;
2711		}
2712	}
2713
2714
2715	if (fl4->flowi4_oif) {
2716		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2717		rth = ERR_PTR(-ENODEV);
2718		if (!dev_out)
2719			goto out;
2720
2721		/* RACE: Check return value of inet_select_addr instead. */
2722		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2723			rth = ERR_PTR(-ENETUNREACH);
2724			goto out;
2725		}
2726		if (ipv4_is_local_multicast(fl4->daddr) ||
2727		    ipv4_is_lbcast(fl4->daddr) ||
2728		    fl4->flowi4_proto == IPPROTO_IGMP) {
2729			if (!fl4->saddr)
2730				fl4->saddr = inet_select_addr(dev_out, 0,
2731							      RT_SCOPE_LINK);
2732			goto make_route;
2733		}
2734		if (!fl4->saddr) {
2735			if (ipv4_is_multicast(fl4->daddr))
2736				fl4->saddr = inet_select_addr(dev_out, 0,
2737							      fl4->flowi4_scope);
2738			else if (!fl4->daddr)
2739				fl4->saddr = inet_select_addr(dev_out, 0,
2740							      RT_SCOPE_HOST);
2741		}
2742	}
2743
2744	if (!fl4->daddr) {
2745		fl4->daddr = fl4->saddr;
2746		if (!fl4->daddr)
2747			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2748		dev_out = net->loopback_dev;
2749		fl4->flowi4_oif = LOOPBACK_IFINDEX;
2750		res->type = RTN_LOCAL;
2751		flags |= RTCF_LOCAL;
2752		goto make_route;
2753	}
2754
2755	err = fib_lookup(net, fl4, res, 0);
2756	if (err) {
2757		res->fi = NULL;
2758		res->table = NULL;
2759		if (fl4->flowi4_oif &&
2760		    (ipv4_is_multicast(fl4->daddr) ||
2761		    !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2762			/* Apparently, routing tables are wrong. Assume,
2763			 * that the destination is on link.
2764			 *
2765			 * WHY? DW.
2766			 * Because we are allowed to send to iface
2767			 * even if it has NO routes and NO assigned
2768			 * addresses. When oif is specified, routing
2769			 * tables are looked up with only one purpose:
2770			 * to catch if destination is gatewayed, rather than
2771			 * direct. Moreover, if MSG_DONTROUTE is set,
2772			 * we send packet, ignoring both routing tables
2773			 * and ifaddr state. --ANK
2774			 *
2775			 *
2776			 * We could make it even if oif is unknown,
2777			 * likely IPv6, but we do not.
2778			 */
2779
2780			if (fl4->saddr == 0)
2781				fl4->saddr = inet_select_addr(dev_out, 0,
2782							      RT_SCOPE_LINK);
2783			res->type = RTN_UNICAST;
2784			goto make_route;
2785		}
2786		rth = ERR_PTR(err);
2787		goto out;
2788	}
2789
2790	if (res->type == RTN_LOCAL) {
2791		if (!fl4->saddr) {
2792			if (res->fi->fib_prefsrc)
2793				fl4->saddr = res->fi->fib_prefsrc;
2794			else
2795				fl4->saddr = fl4->daddr;
2796		}
2797
2798		/* L3 master device is the loopback for that domain */
2799		dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2800			net->loopback_dev;
2801
2802		/* make sure orig_oif points to fib result device even
2803		 * though packet rx/tx happens over loopback or l3mdev
2804		 */
2805		orig_oif = FIB_RES_OIF(*res);
2806
2807		fl4->flowi4_oif = dev_out->ifindex;
2808		flags |= RTCF_LOCAL;
2809		goto make_route;
2810	}
2811
2812	fib_select_path(net, res, fl4, skb);
2813
2814	dev_out = FIB_RES_DEV(*res);
2815
2816make_route:
2817	rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2818
2819out:
2820	return rth;
2821}
2822
2823static struct dst_ops ipv4_dst_blackhole_ops = {
2824	.family			= AF_INET,
2825	.default_advmss		= ipv4_default_advmss,
2826	.neigh_lookup		= ipv4_neigh_lookup,
2827	.check			= dst_blackhole_check,
2828	.cow_metrics		= dst_blackhole_cow_metrics,
2829	.update_pmtu		= dst_blackhole_update_pmtu,
2830	.redirect		= dst_blackhole_redirect,
2831	.mtu			= dst_blackhole_mtu,
2832};
2833
2834struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2835{
2836	struct rtable *ort = (struct rtable *) dst_orig;
2837	struct rtable *rt;
2838
2839	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2840	if (rt) {
2841		struct dst_entry *new = &rt->dst;
2842
2843		new->__use = 1;
2844		new->input = dst_discard;
2845		new->output = dst_discard_out;
2846
2847		new->dev = net->loopback_dev;
2848		if (new->dev)
2849			dev_hold(new->dev);
2850
2851		rt->rt_is_input = ort->rt_is_input;
2852		rt->rt_iif = ort->rt_iif;
2853		rt->rt_pmtu = ort->rt_pmtu;
2854		rt->rt_mtu_locked = ort->rt_mtu_locked;
2855
2856		rt->rt_genid = rt_genid_ipv4(net);
2857		rt->rt_flags = ort->rt_flags;
2858		rt->rt_type = ort->rt_type;
2859		rt->rt_uses_gateway = ort->rt_uses_gateway;
2860		rt->rt_gw_family = ort->rt_gw_family;
2861		if (rt->rt_gw_family == AF_INET)
2862			rt->rt_gw4 = ort->rt_gw4;
2863		else if (rt->rt_gw_family == AF_INET6)
2864			rt->rt_gw6 = ort->rt_gw6;
2865
2866		INIT_LIST_HEAD(&rt->rt_uncached);
2867	}
2868
2869	dst_release(dst_orig);
2870
2871	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2872}
2873
2874struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2875				    const struct sock *sk)
2876{
2877	struct rtable *rt = __ip_route_output_key(net, flp4);
2878
2879	if (IS_ERR(rt))
2880		return rt;
2881
2882	if (flp4->flowi4_proto) {
2883		flp4->flowi4_oif = rt->dst.dev->ifindex;
2884		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2885							flowi4_to_flowi(flp4),
2886							sk, 0);
2887	}
2888
2889	return rt;
2890}
2891EXPORT_SYMBOL_GPL(ip_route_output_flow);
2892
2893struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
2894				      struct net_device *dev,
2895				      struct net *net, __be32 *saddr,
2896				      const struct ip_tunnel_info *info,
2897				      u8 protocol, bool use_cache)
2898{
2899#ifdef CONFIG_DST_CACHE
2900	struct dst_cache *dst_cache;
2901#endif
2902	struct rtable *rt = NULL;
2903	struct flowi4 fl4;
2904	__u8 tos;
2905
2906#ifdef CONFIG_DST_CACHE
2907	dst_cache = (struct dst_cache *)&info->dst_cache;
2908	if (use_cache) {
2909		rt = dst_cache_get_ip4(dst_cache, saddr);
2910		if (rt)
2911			return rt;
2912	}
2913#endif
2914	memset(&fl4, 0, sizeof(fl4));
2915	fl4.flowi4_mark = skb->mark;
2916	fl4.flowi4_proto = protocol;
2917	fl4.daddr = info->key.u.ipv4.dst;
2918	fl4.saddr = info->key.u.ipv4.src;
2919	tos = info->key.tos;
2920	fl4.flowi4_tos = RT_TOS(tos);
2921
2922	rt = ip_route_output_key(net, &fl4);
2923	if (IS_ERR(rt)) {
2924		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
2925		return ERR_PTR(-ENETUNREACH);
2926	}
2927	if (rt->dst.dev == dev) { /* is this necessary? */
2928		netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
2929		ip_rt_put(rt);
2930		return ERR_PTR(-ELOOP);
2931	}
2932#ifdef CONFIG_DST_CACHE
2933	if (use_cache)
2934		dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
2935#endif
2936	*saddr = fl4.saddr;
2937	return rt;
2938}
2939EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
2940
2941/* called with rcu_read_lock held */
2942static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2943			struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2944			struct sk_buff *skb, u32 portid, u32 seq,
2945			unsigned int flags)
2946{
2947	struct rtmsg *r;
2948	struct nlmsghdr *nlh;
2949	unsigned long expires = 0;
2950	u32 error;
2951	u32 metrics[RTAX_MAX];
2952
2953	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2954	if (!nlh)
2955		return -EMSGSIZE;
2956
2957	r = nlmsg_data(nlh);
2958	r->rtm_family	 = AF_INET;
2959	r->rtm_dst_len	= 32;
2960	r->rtm_src_len	= 0;
2961	r->rtm_tos	= fl4 ? fl4->flowi4_tos : 0;
2962	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
2963	if (nla_put_u32(skb, RTA_TABLE, table_id))
2964		goto nla_put_failure;
2965	r->rtm_type	= rt->rt_type;
2966	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2967	r->rtm_protocol = RTPROT_UNSPEC;
2968	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2969	if (rt->rt_flags & RTCF_NOTIFY)
2970		r->rtm_flags |= RTM_F_NOTIFY;
2971	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2972		r->rtm_flags |= RTCF_DOREDIRECT;
2973
2974	if (nla_put_in_addr(skb, RTA_DST, dst))
2975		goto nla_put_failure;
2976	if (src) {
2977		r->rtm_src_len = 32;
2978		if (nla_put_in_addr(skb, RTA_SRC, src))
2979			goto nla_put_failure;
2980	}
2981	if (rt->dst.dev &&
2982	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2983		goto nla_put_failure;
2984	if (rt->dst.lwtstate &&
2985	    lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2986		goto nla_put_failure;
2987#ifdef CONFIG_IP_ROUTE_CLASSID
2988	if (rt->dst.tclassid &&
2989	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2990		goto nla_put_failure;
2991#endif
2992	if (fl4 && !rt_is_input_route(rt) &&
2993	    fl4->saddr != src) {
2994		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2995			goto nla_put_failure;
2996	}
2997	if (rt->rt_uses_gateway) {
2998		if (rt->rt_gw_family == AF_INET &&
2999		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
3000			goto nla_put_failure;
3001		} else if (rt->rt_gw_family == AF_INET6) {
3002			int alen = sizeof(struct in6_addr);
3003			struct nlattr *nla;
3004			struct rtvia *via;
3005
3006			nla = nla_reserve(skb, RTA_VIA, alen + 2);
3007			if (!nla)
3008				goto nla_put_failure;
3009
3010			via = nla_data(nla);
3011			via->rtvia_family = AF_INET6;
3012			memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
3013		}
3014	}
3015
3016	expires = rt->dst.expires;
3017	if (expires) {
3018		unsigned long now = jiffies;
3019
3020		if (time_before(now, expires))
3021			expires -= now;
3022		else
3023			expires = 0;
3024	}
3025
3026	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3027	if (rt->rt_pmtu && expires)
3028		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
3029	if (rt->rt_mtu_locked && expires)
3030		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
3031	if (rtnetlink_put_metrics(skb, metrics) < 0)
3032		goto nla_put_failure;
3033
3034	if (fl4) {
3035		if (fl4->flowi4_mark &&
3036		    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
3037			goto nla_put_failure;
3038
3039		if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
3040		    nla_put_u32(skb, RTA_UID,
3041				from_kuid_munged(current_user_ns(),
3042						 fl4->flowi4_uid)))
3043			goto nla_put_failure;
3044
3045		if (rt_is_input_route(rt)) {
3046#ifdef CONFIG_IP_MROUTE
3047			if (ipv4_is_multicast(dst) &&
3048			    !ipv4_is_local_multicast(dst) &&
3049			    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
3050				int err = ipmr_get_route(net, skb,
3051							 fl4->saddr, fl4->daddr,
3052							 r, portid);
3053
3054				if (err <= 0) {
3055					if (err == 0)
3056						return 0;
3057					goto nla_put_failure;
3058				}
3059			} else
3060#endif
3061				if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
3062					goto nla_put_failure;
3063		}
3064	}
3065
3066	error = rt->dst.error;
3067
3068	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3069		goto nla_put_failure;
3070
3071	nlmsg_end(skb, nlh);
3072	return 0;
3073
3074nla_put_failure:
3075	nlmsg_cancel(skb, nlh);
3076	return -EMSGSIZE;
3077}
3078
3079static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3080			    struct netlink_callback *cb, u32 table_id,
3081			    struct fnhe_hash_bucket *bucket, int genid,
3082			    int *fa_index, int fa_start, unsigned int flags)
3083{
3084	int i;
3085
3086	for (i = 0; i < FNHE_HASH_SIZE; i++) {
3087		struct fib_nh_exception *fnhe;
3088
3089		for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3090		     fnhe = rcu_dereference(fnhe->fnhe_next)) {
3091			struct rtable *rt;
3092			int err;
3093
3094			if (*fa_index < fa_start)
3095				goto next;
3096
3097			if (fnhe->fnhe_genid != genid)
3098				goto next;
3099
3100			if (fnhe->fnhe_expires &&
3101			    time_after(jiffies, fnhe->fnhe_expires))
3102				goto next;
3103
3104			rt = rcu_dereference(fnhe->fnhe_rth_input);
3105			if (!rt)
3106				rt = rcu_dereference(fnhe->fnhe_rth_output);
3107			if (!rt)
3108				goto next;
3109
3110			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3111					   table_id, NULL, skb,
3112					   NETLINK_CB(cb->skb).portid,
3113					   cb->nlh->nlmsg_seq, flags);
3114			if (err)
3115				return err;
3116next:
3117			(*fa_index)++;
3118		}
3119	}
3120
3121	return 0;
3122}
3123
3124int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3125		       u32 table_id, struct fib_info *fi,
3126		       int *fa_index, int fa_start, unsigned int flags)
3127{
3128	struct net *net = sock_net(cb->skb->sk);
3129	int nhsel, genid = fnhe_genid(net);
3130
3131	for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3132		struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3133		struct fnhe_hash_bucket *bucket;
3134		int err;
3135
3136		if (nhc->nhc_flags & RTNH_F_DEAD)
3137			continue;
3138
3139		rcu_read_lock();
3140		bucket = rcu_dereference(nhc->nhc_exceptions);
3141		err = 0;
3142		if (bucket)
3143			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3144					       genid, fa_index, fa_start,
3145					       flags);
3146		rcu_read_unlock();
3147		if (err)
3148			return err;
3149	}
3150
3151	return 0;
3152}
3153
3154static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3155						   u8 ip_proto, __be16 sport,
3156						   __be16 dport)
3157{
3158	struct sk_buff *skb;
3159	struct iphdr *iph;
3160
3161	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3162	if (!skb)
3163		return NULL;
3164
3165	/* Reserve room for dummy headers, this skb can pass
3166	 * through good chunk of routing engine.
3167	 */
3168	skb_reset_mac_header(skb);
3169	skb_reset_network_header(skb);
3170	skb->protocol = htons(ETH_P_IP);
3171	iph = skb_put(skb, sizeof(struct iphdr));
3172	iph->protocol = ip_proto;
3173	iph->saddr = src;
3174	iph->daddr = dst;
3175	iph->version = 0x4;
3176	iph->frag_off = 0;
3177	iph->ihl = 0x5;
3178	skb_set_transport_header(skb, skb->len);
3179
3180	switch (iph->protocol) {
3181	case IPPROTO_UDP: {
3182		struct udphdr *udph;
3183
3184		udph = skb_put_zero(skb, sizeof(struct udphdr));
3185		udph->source = sport;
3186		udph->dest = dport;
3187		udph->len = htons(sizeof(struct udphdr));
3188		udph->check = 0;
3189		break;
3190	}
3191	case IPPROTO_TCP: {
3192		struct tcphdr *tcph;
3193
3194		tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3195		tcph->source	= sport;
3196		tcph->dest	= dport;
3197		tcph->doff	= sizeof(struct tcphdr) / 4;
3198		tcph->rst = 1;
3199		tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3200					    src, dst, 0);
3201		break;
3202	}
3203	case IPPROTO_ICMP: {
3204		struct icmphdr *icmph;
3205
3206		icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3207		icmph->type = ICMP_ECHO;
3208		icmph->code = 0;
3209	}
3210	}
3211
3212	return skb;
3213}
3214
3215static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3216				       const struct nlmsghdr *nlh,
3217				       struct nlattr **tb,
3218				       struct netlink_ext_ack *extack)
3219{
3220	struct rtmsg *rtm;
3221	int i, err;
3222
3223	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3224		NL_SET_ERR_MSG(extack,
3225			       "ipv4: Invalid header for route get request");
3226		return -EINVAL;
3227	}
3228
3229	if (!netlink_strict_get_check(skb))
3230		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3231					      rtm_ipv4_policy, extack);
3232
3233	rtm = nlmsg_data(nlh);
3234	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3235	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3236	    rtm->rtm_table || rtm->rtm_protocol ||
3237	    rtm->rtm_scope || rtm->rtm_type) {
3238		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3239		return -EINVAL;
3240	}
3241
3242	if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3243			       RTM_F_LOOKUP_TABLE |
3244			       RTM_F_FIB_MATCH)) {
3245		NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3246		return -EINVAL;
3247	}
3248
3249	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3250					    rtm_ipv4_policy, extack);
3251	if (err)
3252		return err;
3253
3254	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3255	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3256		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3257		return -EINVAL;
3258	}
3259
3260	for (i = 0; i <= RTA_MAX; i++) {
3261		if (!tb[i])
3262			continue;
3263
3264		switch (i) {
3265		case RTA_IIF:
3266		case RTA_OIF:
3267		case RTA_SRC:
3268		case RTA_DST:
3269		case RTA_IP_PROTO:
3270		case RTA_SPORT:
3271		case RTA_DPORT:
3272		case RTA_MARK:
3273		case RTA_UID:
3274			break;
3275		default:
3276			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3277			return -EINVAL;
3278		}
3279	}
3280
3281	return 0;
3282}
3283
3284static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3285			     struct netlink_ext_ack *extack)
3286{
3287	struct net *net = sock_net(in_skb->sk);
3288	struct nlattr *tb[RTA_MAX+1];
3289	u32 table_id = RT_TABLE_MAIN;
3290	__be16 sport = 0, dport = 0;
3291	struct fib_result res = {};
3292	u8 ip_proto = IPPROTO_UDP;
3293	struct rtable *rt = NULL;
3294	struct sk_buff *skb;
3295	struct rtmsg *rtm;
3296	struct flowi4 fl4 = {};
3297	__be32 dst = 0;
3298	__be32 src = 0;
3299	kuid_t uid;
3300	u32 iif;
3301	int err;
3302	int mark;
3303
3304	err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3305	if (err < 0)
3306		return err;
3307
3308	rtm = nlmsg_data(nlh);
3309	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3310	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3311	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3312	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3313	if (tb[RTA_UID])
3314		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3315	else
3316		uid = (iif ? INVALID_UID : current_uid());
3317
3318	if (tb[RTA_IP_PROTO]) {
3319		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3320						  &ip_proto, AF_INET, extack);
3321		if (err)
3322			return err;
3323	}
3324
3325	if (tb[RTA_SPORT])
3326		sport = nla_get_be16(tb[RTA_SPORT]);
3327
3328	if (tb[RTA_DPORT])
3329		dport = nla_get_be16(tb[RTA_DPORT]);
3330
3331	skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3332	if (!skb)
3333		return -ENOBUFS;
3334
3335	fl4.daddr = dst;
3336	fl4.saddr = src;
3337	fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3338	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3339	fl4.flowi4_mark = mark;
3340	fl4.flowi4_uid = uid;
3341	if (sport)
3342		fl4.fl4_sport = sport;
3343	if (dport)
3344		fl4.fl4_dport = dport;
3345	fl4.flowi4_proto = ip_proto;
3346
3347	rcu_read_lock();
3348
3349	if (iif) {
3350		struct net_device *dev;
3351
3352		dev = dev_get_by_index_rcu(net, iif);
3353		if (!dev) {
3354			err = -ENODEV;
3355			goto errout_rcu;
3356		}
3357
3358		fl4.flowi4_iif = iif; /* for rt_fill_info */
3359		skb->dev	= dev;
3360		skb->mark	= mark;
3361		err = ip_route_input_rcu(skb, dst, src,
3362					 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3363					 &res);
3364
3365		rt = skb_rtable(skb);
3366		if (err == 0 && rt->dst.error)
3367			err = -rt->dst.error;
3368	} else {
3369		fl4.flowi4_iif = LOOPBACK_IFINDEX;
3370		skb->dev = net->loopback_dev;
3371		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3372		err = 0;
3373		if (IS_ERR(rt))
3374			err = PTR_ERR(rt);
3375		else
3376			skb_dst_set(skb, &rt->dst);
3377	}
3378
3379	if (err)
3380		goto errout_rcu;
3381
3382	if (rtm->rtm_flags & RTM_F_NOTIFY)
3383		rt->rt_flags |= RTCF_NOTIFY;
3384
3385	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3386		table_id = res.table ? res.table->tb_id : 0;
3387
3388	/* reset skb for netlink reply msg */
3389	skb_trim(skb, 0);
3390	skb_reset_network_header(skb);
3391	skb_reset_transport_header(skb);
3392	skb_reset_mac_header(skb);
3393
3394	if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3395		struct fib_rt_info fri;
3396
3397		if (!res.fi) {
3398			err = fib_props[res.type].error;
3399			if (!err)
3400				err = -EHOSTUNREACH;
3401			goto errout_rcu;
3402		}
3403		fri.fi = res.fi;
3404		fri.tb_id = table_id;
3405		fri.dst = res.prefix;
3406		fri.dst_len = res.prefixlen;
3407		fri.tos = fl4.flowi4_tos;
3408		fri.type = rt->rt_type;
3409		fri.offload = 0;
3410		fri.trap = 0;
3411		fri.offload_failed = 0;
3412		if (res.fa_head) {
3413			struct fib_alias *fa;
3414
3415			hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3416				u8 slen = 32 - fri.dst_len;
3417
3418				if (fa->fa_slen == slen &&
3419				    fa->tb_id == fri.tb_id &&
3420				    fa->fa_tos == fri.tos &&
3421				    fa->fa_info == res.fi &&
3422				    fa->fa_type == fri.type) {
3423					fri.offload = fa->offload;
3424					fri.trap = fa->trap;
 
 
3425					break;
3426				}
3427			}
3428		}
3429		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3430				    nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3431	} else {
3432		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3433				   NETLINK_CB(in_skb).portid,
3434				   nlh->nlmsg_seq, 0);
3435	}
3436	if (err < 0)
3437		goto errout_rcu;
3438
3439	rcu_read_unlock();
3440
3441	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3442
3443errout_free:
3444	return err;
3445errout_rcu:
3446	rcu_read_unlock();
3447	kfree_skb(skb);
3448	goto errout_free;
3449}
3450
3451void ip_rt_multicast_event(struct in_device *in_dev)
3452{
3453	rt_cache_flush(dev_net(in_dev->dev));
3454}
3455
3456#ifdef CONFIG_SYSCTL
3457static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3458static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
3459static int ip_rt_gc_elasticity __read_mostly	= 8;
3460static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
3461
3462static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3463		void *buffer, size_t *lenp, loff_t *ppos)
3464{
3465	struct net *net = (struct net *)__ctl->extra1;
3466
3467	if (write) {
3468		rt_cache_flush(net);
3469		fnhe_genid_bump(net);
3470		return 0;
3471	}
3472
3473	return -EINVAL;
3474}
3475
3476static struct ctl_table ipv4_route_table[] = {
3477	{
3478		.procname	= "gc_thresh",
3479		.data		= &ipv4_dst_ops.gc_thresh,
3480		.maxlen		= sizeof(int),
3481		.mode		= 0644,
3482		.proc_handler	= proc_dointvec,
3483	},
3484	{
3485		.procname	= "max_size",
3486		.data		= &ip_rt_max_size,
3487		.maxlen		= sizeof(int),
3488		.mode		= 0644,
3489		.proc_handler	= proc_dointvec,
3490	},
3491	{
3492		/*  Deprecated. Use gc_min_interval_ms */
3493
3494		.procname	= "gc_min_interval",
3495		.data		= &ip_rt_gc_min_interval,
3496		.maxlen		= sizeof(int),
3497		.mode		= 0644,
3498		.proc_handler	= proc_dointvec_jiffies,
3499	},
3500	{
3501		.procname	= "gc_min_interval_ms",
3502		.data		= &ip_rt_gc_min_interval,
3503		.maxlen		= sizeof(int),
3504		.mode		= 0644,
3505		.proc_handler	= proc_dointvec_ms_jiffies,
3506	},
3507	{
3508		.procname	= "gc_timeout",
3509		.data		= &ip_rt_gc_timeout,
3510		.maxlen		= sizeof(int),
3511		.mode		= 0644,
3512		.proc_handler	= proc_dointvec_jiffies,
3513	},
3514	{
3515		.procname	= "gc_interval",
3516		.data		= &ip_rt_gc_interval,
3517		.maxlen		= sizeof(int),
3518		.mode		= 0644,
3519		.proc_handler	= proc_dointvec_jiffies,
3520	},
3521	{
3522		.procname	= "redirect_load",
3523		.data		= &ip_rt_redirect_load,
3524		.maxlen		= sizeof(int),
3525		.mode		= 0644,
3526		.proc_handler	= proc_dointvec,
3527	},
3528	{
3529		.procname	= "redirect_number",
3530		.data		= &ip_rt_redirect_number,
3531		.maxlen		= sizeof(int),
3532		.mode		= 0644,
3533		.proc_handler	= proc_dointvec,
3534	},
3535	{
3536		.procname	= "redirect_silence",
3537		.data		= &ip_rt_redirect_silence,
3538		.maxlen		= sizeof(int),
3539		.mode		= 0644,
3540		.proc_handler	= proc_dointvec,
3541	},
3542	{
3543		.procname	= "error_cost",
3544		.data		= &ip_rt_error_cost,
3545		.maxlen		= sizeof(int),
3546		.mode		= 0644,
3547		.proc_handler	= proc_dointvec,
3548	},
3549	{
3550		.procname	= "error_burst",
3551		.data		= &ip_rt_error_burst,
3552		.maxlen		= sizeof(int),
3553		.mode		= 0644,
3554		.proc_handler	= proc_dointvec,
3555	},
3556	{
3557		.procname	= "gc_elasticity",
3558		.data		= &ip_rt_gc_elasticity,
3559		.maxlen		= sizeof(int),
3560		.mode		= 0644,
3561		.proc_handler	= proc_dointvec,
3562	},
3563	{
3564		.procname	= "mtu_expires",
3565		.data		= &ip_rt_mtu_expires,
3566		.maxlen		= sizeof(int),
3567		.mode		= 0644,
3568		.proc_handler	= proc_dointvec_jiffies,
3569	},
3570	{
3571		.procname	= "min_pmtu",
3572		.data		= &ip_rt_min_pmtu,
3573		.maxlen		= sizeof(int),
3574		.mode		= 0644,
3575		.proc_handler	= proc_dointvec_minmax,
3576		.extra1		= &ip_min_valid_pmtu,
3577	},
3578	{
3579		.procname	= "min_adv_mss",
3580		.data		= &ip_rt_min_advmss,
3581		.maxlen		= sizeof(int),
3582		.mode		= 0644,
3583		.proc_handler	= proc_dointvec,
3584	},
3585	{ }
3586};
3587
3588static const char ipv4_route_flush_procname[] = "flush";
3589
3590static struct ctl_table ipv4_route_flush_table[] = {
3591	{
3592		.procname	= ipv4_route_flush_procname,
3593		.maxlen		= sizeof(int),
3594		.mode		= 0200,
3595		.proc_handler	= ipv4_sysctl_rtcache_flush,
3596	},
3597	{ },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3598};
3599
3600static __net_init int sysctl_route_net_init(struct net *net)
3601{
3602	struct ctl_table *tbl;
 
3603
3604	tbl = ipv4_route_flush_table;
3605	if (!net_eq(net, &init_net)) {
3606		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
 
 
3607		if (!tbl)
3608			goto err_dup;
3609
3610		/* Don't export non-whitelisted sysctls to unprivileged users */
3611		if (net->user_ns != &init_user_ns) {
3612			if (tbl[0].procname != ipv4_route_flush_procname)
3613				tbl[0].procname = NULL;
3614		}
 
 
 
 
 
 
3615	}
3616	tbl[0].extra1 = net;
3617
3618	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
 
3619	if (!net->ipv4.route_hdr)
3620		goto err_reg;
3621	return 0;
3622
3623err_reg:
3624	if (tbl != ipv4_route_flush_table)
3625		kfree(tbl);
3626err_dup:
3627	return -ENOMEM;
3628}
3629
3630static __net_exit void sysctl_route_net_exit(struct net *net)
3631{
3632	struct ctl_table *tbl;
3633
3634	tbl = net->ipv4.route_hdr->ctl_table_arg;
3635	unregister_net_sysctl_table(net->ipv4.route_hdr);
3636	BUG_ON(tbl == ipv4_route_flush_table);
3637	kfree(tbl);
3638}
3639
3640static __net_initdata struct pernet_operations sysctl_route_ops = {
3641	.init = sysctl_route_net_init,
3642	.exit = sysctl_route_net_exit,
3643};
3644#endif
3645
 
 
 
 
 
 
 
 
 
 
 
 
 
3646static __net_init int rt_genid_init(struct net *net)
3647{
3648	atomic_set(&net->ipv4.rt_genid, 0);
3649	atomic_set(&net->fnhe_genid, 0);
3650	atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3651	return 0;
3652}
3653
3654static __net_initdata struct pernet_operations rt_genid_ops = {
3655	.init = rt_genid_init,
3656};
3657
3658static int __net_init ipv4_inetpeer_init(struct net *net)
3659{
3660	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3661
3662	if (!bp)
3663		return -ENOMEM;
3664	inet_peer_base_init(bp);
3665	net->ipv4.peers = bp;
3666	return 0;
3667}
3668
3669static void __net_exit ipv4_inetpeer_exit(struct net *net)
3670{
3671	struct inet_peer_base *bp = net->ipv4.peers;
3672
3673	net->ipv4.peers = NULL;
3674	inetpeer_invalidate_tree(bp);
3675	kfree(bp);
3676}
3677
3678static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3679	.init	=	ipv4_inetpeer_init,
3680	.exit	=	ipv4_inetpeer_exit,
3681};
3682
3683#ifdef CONFIG_IP_ROUTE_CLASSID
3684struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3685#endif /* CONFIG_IP_ROUTE_CLASSID */
3686
 
 
 
 
 
3687int __init ip_rt_init(void)
3688{
3689	void *idents_hash;
3690	int cpu;
3691
3692	/* For modern hosts, this will use 2 MB of memory */
3693	idents_hash = alloc_large_system_hash("IP idents",
3694					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
3695					      0,
3696					      16, /* one bucket per 64 KB */
3697					      HASH_ZERO,
3698					      NULL,
3699					      &ip_idents_mask,
3700					      2048,
3701					      256*1024);
3702
3703	ip_idents = idents_hash;
3704
3705	prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3706
3707	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3708
3709	for_each_possible_cpu(cpu) {
3710		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3711
3712		INIT_LIST_HEAD(&ul->head);
3713		spin_lock_init(&ul->lock);
3714	}
3715#ifdef CONFIG_IP_ROUTE_CLASSID
3716	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3717	if (!ip_rt_acct)
3718		panic("IP: failed to allocate ip_rt_acct\n");
3719#endif
3720
3721	ipv4_dst_ops.kmem_cachep =
3722		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3723				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3724
3725	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3726
3727	if (dst_entries_init(&ipv4_dst_ops) < 0)
3728		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3729
3730	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3731		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3732
3733	ipv4_dst_ops.gc_thresh = ~0;
3734	ip_rt_max_size = INT_MAX;
3735
3736	devinet_init();
3737	ip_fib_init();
3738
3739	if (ip_rt_proc_init())
3740		pr_err("Unable to create route proc files\n");
3741#ifdef CONFIG_XFRM
3742	xfrm_init();
3743	xfrm4_init();
3744#endif
3745	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3746		      RTNL_FLAG_DOIT_UNLOCKED);
3747
3748#ifdef CONFIG_SYSCTL
3749	register_pernet_subsys(&sysctl_route_ops);
3750#endif
 
3751	register_pernet_subsys(&rt_genid_ops);
3752	register_pernet_subsys(&ipv4_inetpeer_ops);
3753	return 0;
3754}
3755
3756#ifdef CONFIG_SYSCTL
3757/*
3758 * We really need to sanitize the damn ipv4 init order, then all
3759 * this nonsense will go away.
3760 */
3761void __init ip_static_sysctl_init(void)
3762{
3763	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3764}
3765#endif