Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		ROUTE - implementation of the IP router.
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  11 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
  12 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  13 *
  14 * Fixes:
  15 *		Alan Cox	:	Verify area fixes.
  16 *		Alan Cox	:	cli() protects routing changes
  17 *		Rui Oliveira	:	ICMP routing table updates
  18 *		(rco@di.uminho.pt)	Routing table insertion and update
  19 *		Linus Torvalds	:	Rewrote bits to be sensible
  20 *		Alan Cox	:	Added BSD route gw semantics
  21 *		Alan Cox	:	Super /proc >4K
  22 *		Alan Cox	:	MTU in route table
  23 *		Alan Cox	: 	MSS actually. Also added the window
  24 *					clamper.
  25 *		Sam Lantinga	:	Fixed route matching in rt_del()
  26 *		Alan Cox	:	Routing cache support.
  27 *		Alan Cox	:	Removed compatibility cruft.
  28 *		Alan Cox	:	RTF_REJECT support.
  29 *		Alan Cox	:	TCP irtt support.
  30 *		Jonathan Naylor	:	Added Metric support.
  31 *	Miquel van Smoorenburg	:	BSD API fixes.
  32 *	Miquel van Smoorenburg	:	Metrics.
  33 *		Alan Cox	:	Use __u32 properly
  34 *		Alan Cox	:	Aligned routing errors more closely with BSD
  35 *					our system is still very different.
  36 *		Alan Cox	:	Faster /proc handling
  37 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
  38 *					routing caches and better behaviour.
  39 *
  40 *		Olaf Erb	:	irtt wasn't being copied right.
  41 *		Bjorn Ekwall	:	Kerneld route support.
  42 *		Alan Cox	:	Multicast fixed (I hope)
  43 * 		Pavel Krauz	:	Limited broadcast fixed
  44 *		Mike McLagan	:	Routing by source
  45 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
  46 *					route.c and rewritten from scratch.
  47 *		Andi Kleen	:	Load-limit warning messages.
  48 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  49 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
  50 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
  51 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
  52 *		Marc Boucher	:	routing by fwmark
  53 *	Robert Olsson		:	Added rt_cache statistics
  54 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
  55 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
  56 * 	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
  57 * 	Ilia Sotnikov		:	Removed TOS from hash calculations
  58 *
  59 *		This program is free software; you can redistribute it and/or
  60 *		modify it under the terms of the GNU General Public License
  61 *		as published by the Free Software Foundation; either version
  62 *		2 of the License, or (at your option) any later version.
  63 */
  64
  65#define pr_fmt(fmt) "IPv4: " fmt
  66
  67#include <linux/module.h>
  68#include <asm/uaccess.h>
  69#include <linux/bitops.h>
  70#include <linux/types.h>
  71#include <linux/kernel.h>
  72#include <linux/mm.h>
  73#include <linux/bootmem.h>
  74#include <linux/string.h>
  75#include <linux/socket.h>
  76#include <linux/sockios.h>
  77#include <linux/errno.h>
  78#include <linux/in.h>
  79#include <linux/inet.h>
  80#include <linux/netdevice.h>
  81#include <linux/proc_fs.h>
  82#include <linux/init.h>
  83#include <linux/workqueue.h>
  84#include <linux/skbuff.h>
  85#include <linux/inetdevice.h>
  86#include <linux/igmp.h>
  87#include <linux/pkt_sched.h>
  88#include <linux/mroute.h>
  89#include <linux/netfilter_ipv4.h>
  90#include <linux/random.h>
  91#include <linux/jhash.h>
  92#include <linux/rcupdate.h>
  93#include <linux/times.h>
  94#include <linux/slab.h>
  95#include <linux/prefetch.h>
  96#include <net/dst.h>
 
 
  97#include <net/net_namespace.h>
  98#include <net/protocol.h>
  99#include <net/ip.h>
 100#include <net/route.h>
 101#include <net/inetpeer.h>
 102#include <net/sock.h>
 103#include <net/ip_fib.h>
 104#include <net/arp.h>
 105#include <net/tcp.h>
 106#include <net/icmp.h>
 107#include <net/xfrm.h>
 
 108#include <net/netevent.h>
 109#include <net/rtnetlink.h>
 110#ifdef CONFIG_SYSCTL
 111#include <linux/sysctl.h>
 112#include <linux/kmemleak.h>
 113#endif
 114#include <net/secure_seq.h>
 
 
 
 115
 116#define RT_FL_TOS(oldflp4) \
 117	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 118
 119#define IP_MAX_MTU	0xFFF0
 120
 121#define RT_GC_TIMEOUT (300*HZ)
 122
 
 
 
 123static int ip_rt_max_size;
 124static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
 125static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 126static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
 127static int ip_rt_redirect_number __read_mostly	= 9;
 128static int ip_rt_redirect_load __read_mostly	= HZ / 50;
 129static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
 130static int ip_rt_error_cost __read_mostly	= HZ;
 131static int ip_rt_error_burst __read_mostly	= 5 * HZ;
 132static int ip_rt_gc_elasticity __read_mostly	= 8;
 133static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
 134static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
 135static int ip_rt_min_advmss __read_mostly	= 256;
 136static int rt_chain_length_max __read_mostly	= 20;
 137
 138static struct delayed_work expires_work;
 139static unsigned long expires_ljiffies;
 140
 141/*
 142 *	Interface to generic destination cache.
 143 */
 144
 145static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 
 146static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
 147static unsigned int	 ipv4_mtu(const struct dst_entry *dst);
 148static void		 ipv4_dst_destroy(struct dst_entry *dst);
 149static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 
 150static void		 ipv4_link_failure(struct sk_buff *skb);
 151static void		 ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
 152static int rt_garbage_collect(struct dst_ops *ops);
 153
 154static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 155			    int how)
 156{
 157}
 158
 159static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
 160{
 161	struct rtable *rt = (struct rtable *) dst;
 162	struct inet_peer *peer;
 163	u32 *p = NULL;
 164
 165	if (!rt->peer)
 166		rt_bind_peer(rt, rt->rt_dst, 1);
 167
 168	peer = rt->peer;
 169	if (peer) {
 170		u32 *old_p = __DST_METRICS_PTR(old);
 171		unsigned long prev, new;
 172
 173		p = peer->metrics;
 174		if (inet_metrics_new(peer))
 175			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
 176
 177		new = (unsigned long) p;
 178		prev = cmpxchg(&dst->_metrics, old, new);
 179
 180		if (prev != old) {
 181			p = __DST_METRICS_PTR(prev);
 182			if (prev & DST_METRICS_READ_ONLY)
 183				p = NULL;
 184		} else {
 185			if (rt->fi) {
 186				fib_info_put(rt->fi);
 187				rt->fi = NULL;
 188			}
 189		}
 190	}
 191	return p;
 192}
 193
 194static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
 
 
 
 195
 196static struct dst_ops ipv4_dst_ops = {
 197	.family =		AF_INET,
 198	.protocol =		cpu_to_be16(ETH_P_IP),
 199	.gc =			rt_garbage_collect,
 200	.check =		ipv4_dst_check,
 201	.default_advmss =	ipv4_default_advmss,
 202	.mtu =			ipv4_mtu,
 203	.cow_metrics =		ipv4_cow_metrics,
 204	.destroy =		ipv4_dst_destroy,
 205	.ifdown =		ipv4_dst_ifdown,
 206	.negative_advice =	ipv4_negative_advice,
 207	.link_failure =		ipv4_link_failure,
 208	.update_pmtu =		ip_rt_update_pmtu,
 
 209	.local_out =		__ip_local_out,
 210	.neigh_lookup =		ipv4_neigh_lookup,
 
 211};
 212
 213#define ECN_OR_COST(class)	TC_PRIO_##class
 214
 215const __u8 ip_tos2prio[16] = {
 216	TC_PRIO_BESTEFFORT,
 217	ECN_OR_COST(BESTEFFORT),
 218	TC_PRIO_BESTEFFORT,
 219	ECN_OR_COST(BESTEFFORT),
 220	TC_PRIO_BULK,
 221	ECN_OR_COST(BULK),
 222	TC_PRIO_BULK,
 223	ECN_OR_COST(BULK),
 224	TC_PRIO_INTERACTIVE,
 225	ECN_OR_COST(INTERACTIVE),
 226	TC_PRIO_INTERACTIVE,
 227	ECN_OR_COST(INTERACTIVE),
 228	TC_PRIO_INTERACTIVE_BULK,
 229	ECN_OR_COST(INTERACTIVE_BULK),
 230	TC_PRIO_INTERACTIVE_BULK,
 231	ECN_OR_COST(INTERACTIVE_BULK)
 232};
 233EXPORT_SYMBOL(ip_tos2prio);
 234
 235/*
 236 * Route cache.
 237 */
 238
 239/* The locking scheme is rather straight forward:
 240 *
 241 * 1) Read-Copy Update protects the buckets of the central route hash.
 242 * 2) Only writers remove entries, and they hold the lock
 243 *    as they look at rtable reference counts.
 244 * 3) Only readers acquire references to rtable entries,
 245 *    they do so with atomic increments and with the
 246 *    lock held.
 247 */
 248
 249struct rt_hash_bucket {
 250	struct rtable __rcu	*chain;
 251};
 252
 253#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
 254	defined(CONFIG_PROVE_LOCKING)
 255/*
 256 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
 257 * The size of this table is a power of two and depends on the number of CPUS.
 258 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
 259 */
 260#ifdef CONFIG_LOCKDEP
 261# define RT_HASH_LOCK_SZ	256
 262#else
 263# if NR_CPUS >= 32
 264#  define RT_HASH_LOCK_SZ	4096
 265# elif NR_CPUS >= 16
 266#  define RT_HASH_LOCK_SZ	2048
 267# elif NR_CPUS >= 8
 268#  define RT_HASH_LOCK_SZ	1024
 269# elif NR_CPUS >= 4
 270#  define RT_HASH_LOCK_SZ	512
 271# else
 272#  define RT_HASH_LOCK_SZ	256
 273# endif
 274#endif
 275
 276static spinlock_t	*rt_hash_locks;
 277# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
 278
 279static __init void rt_hash_lock_init(void)
 280{
 281	int i;
 282
 283	rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
 284			GFP_KERNEL);
 285	if (!rt_hash_locks)
 286		panic("IP: failed to allocate rt_hash_locks\n");
 287
 288	for (i = 0; i < RT_HASH_LOCK_SZ; i++)
 289		spin_lock_init(&rt_hash_locks[i]);
 290}
 291#else
 292# define rt_hash_lock_addr(slot) NULL
 293
 294static inline void rt_hash_lock_init(void)
 295{
 296}
 297#endif
 298
 299static struct rt_hash_bucket 	*rt_hash_table __read_mostly;
 300static unsigned int		rt_hash_mask __read_mostly;
 301static unsigned int		rt_hash_log  __read_mostly;
 302
 303static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
 304#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
 305
 306static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
 307				   int genid)
 308{
 309	return jhash_3words((__force u32)daddr, (__force u32)saddr,
 310			    idx, genid)
 311		& rt_hash_mask;
 312}
 313
 314static inline int rt_genid(struct net *net)
 315{
 316	return atomic_read(&net->ipv4.rt_genid);
 317}
 318
 319#ifdef CONFIG_PROC_FS
 320struct rt_cache_iter_state {
 321	struct seq_net_private p;
 322	int bucket;
 323	int genid;
 324};
 325
 326static struct rtable *rt_cache_get_first(struct seq_file *seq)
 327{
 328	struct rt_cache_iter_state *st = seq->private;
 329	struct rtable *r = NULL;
 330
 331	for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
 332		if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
 333			continue;
 334		rcu_read_lock_bh();
 335		r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
 336		while (r) {
 337			if (dev_net(r->dst.dev) == seq_file_net(seq) &&
 338			    r->rt_genid == st->genid)
 339				return r;
 340			r = rcu_dereference_bh(r->dst.rt_next);
 341		}
 342		rcu_read_unlock_bh();
 343	}
 344	return r;
 345}
 346
 347static struct rtable *__rt_cache_get_next(struct seq_file *seq,
 348					  struct rtable *r)
 349{
 350	struct rt_cache_iter_state *st = seq->private;
 351
 352	r = rcu_dereference_bh(r->dst.rt_next);
 353	while (!r) {
 354		rcu_read_unlock_bh();
 355		do {
 356			if (--st->bucket < 0)
 357				return NULL;
 358		} while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
 359		rcu_read_lock_bh();
 360		r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
 361	}
 362	return r;
 363}
 364
 365static struct rtable *rt_cache_get_next(struct seq_file *seq,
 366					struct rtable *r)
 367{
 368	struct rt_cache_iter_state *st = seq->private;
 369	while ((r = __rt_cache_get_next(seq, r)) != NULL) {
 370		if (dev_net(r->dst.dev) != seq_file_net(seq))
 371			continue;
 372		if (r->rt_genid == st->genid)
 373			break;
 374	}
 375	return r;
 376}
 377
 378static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
 379{
 380	struct rtable *r = rt_cache_get_first(seq);
 381
 382	if (r)
 383		while (pos && (r = rt_cache_get_next(seq, r)))
 384			--pos;
 385	return pos ? NULL : r;
 386}
 387
 388static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 389{
 390	struct rt_cache_iter_state *st = seq->private;
 391	if (*pos)
 392		return rt_cache_get_idx(seq, *pos - 1);
 393	st->genid = rt_genid(seq_file_net(seq));
 394	return SEQ_START_TOKEN;
 395}
 396
 397static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 398{
 399	struct rtable *r;
 400
 401	if (v == SEQ_START_TOKEN)
 402		r = rt_cache_get_first(seq);
 403	else
 404		r = rt_cache_get_next(seq, v);
 405	++*pos;
 406	return r;
 407}
 408
 409static void rt_cache_seq_stop(struct seq_file *seq, void *v)
 410{
 411	if (v && v != SEQ_START_TOKEN)
 412		rcu_read_unlock_bh();
 413}
 414
 415static int rt_cache_seq_show(struct seq_file *seq, void *v)
 416{
 417	if (v == SEQ_START_TOKEN)
 418		seq_printf(seq, "%-127s\n",
 419			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
 420			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
 421			   "HHUptod\tSpecDst");
 422	else {
 423		struct rtable *r = v;
 424		struct neighbour *n;
 425		int len, HHUptod;
 426
 427		rcu_read_lock();
 428		n = dst_get_neighbour_noref(&r->dst);
 429		HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
 430		rcu_read_unlock();
 431
 432		seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
 433			      "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
 434			r->dst.dev ? r->dst.dev->name : "*",
 435			(__force u32)r->rt_dst,
 436			(__force u32)r->rt_gateway,
 437			r->rt_flags, atomic_read(&r->dst.__refcnt),
 438			r->dst.__use, 0, (__force u32)r->rt_src,
 439			dst_metric_advmss(&r->dst) + 40,
 440			dst_metric(&r->dst, RTAX_WINDOW),
 441			(int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
 442			      dst_metric(&r->dst, RTAX_RTTVAR)),
 443			r->rt_key_tos,
 444			-1,
 445			HHUptod,
 446			r->rt_spec_dst, &len);
 447
 448		seq_printf(seq, "%*s\n", 127 - len, "");
 449	}
 450	return 0;
 451}
 452
 453static const struct seq_operations rt_cache_seq_ops = {
 454	.start  = rt_cache_seq_start,
 455	.next   = rt_cache_seq_next,
 456	.stop   = rt_cache_seq_stop,
 457	.show   = rt_cache_seq_show,
 458};
 459
 460static int rt_cache_seq_open(struct inode *inode, struct file *file)
 461{
 462	return seq_open_net(inode, file, &rt_cache_seq_ops,
 463			sizeof(struct rt_cache_iter_state));
 464}
 465
 466static const struct file_operations rt_cache_seq_fops = {
 467	.owner	 = THIS_MODULE,
 468	.open	 = rt_cache_seq_open,
 469	.read	 = seq_read,
 470	.llseek	 = seq_lseek,
 471	.release = seq_release_net,
 472};
 473
 474
 475static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 476{
 477	int cpu;
 478
 479	if (*pos == 0)
 480		return SEQ_START_TOKEN;
 481
 482	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
 483		if (!cpu_possible(cpu))
 484			continue;
 485		*pos = cpu+1;
 486		return &per_cpu(rt_cache_stat, cpu);
 487	}
 488	return NULL;
 489}
 490
 491static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 492{
 493	int cpu;
 494
 495	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
 496		if (!cpu_possible(cpu))
 497			continue;
 498		*pos = cpu+1;
 499		return &per_cpu(rt_cache_stat, cpu);
 500	}
 
 501	return NULL;
 502
 503}
 504
 505static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
 506{
 507
 508}
 509
 510static int rt_cpu_seq_show(struct seq_file *seq, void *v)
 511{
 512	struct rt_cache_stat *st = v;
 513
 514	if (v == SEQ_START_TOKEN) {
 515		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
 516		return 0;
 517	}
 518
 519	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
 520		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
 
 521		   dst_entries_get_slow(&ipv4_dst_ops),
 522		   st->in_hit,
 523		   st->in_slow_tot,
 524		   st->in_slow_mc,
 525		   st->in_no_route,
 526		   st->in_brd,
 527		   st->in_martian_dst,
 528		   st->in_martian_src,
 529
 530		   st->out_hit,
 531		   st->out_slow_tot,
 532		   st->out_slow_mc,
 533
 534		   st->gc_total,
 535		   st->gc_ignored,
 536		   st->gc_goal_miss,
 537		   st->gc_dst_overflow,
 538		   st->in_hlist_search,
 539		   st->out_hlist_search
 540		);
 541	return 0;
 542}
 543
 544static const struct seq_operations rt_cpu_seq_ops = {
 545	.start  = rt_cpu_seq_start,
 546	.next   = rt_cpu_seq_next,
 547	.stop   = rt_cpu_seq_stop,
 548	.show   = rt_cpu_seq_show,
 549};
 550
 551
 552static int rt_cpu_seq_open(struct inode *inode, struct file *file)
 553{
 554	return seq_open(file, &rt_cpu_seq_ops);
 555}
 556
 557static const struct file_operations rt_cpu_seq_fops = {
 558	.owner	 = THIS_MODULE,
 559	.open	 = rt_cpu_seq_open,
 560	.read	 = seq_read,
 561	.llseek	 = seq_lseek,
 562	.release = seq_release,
 563};
 564
 565#ifdef CONFIG_IP_ROUTE_CLASSID
 566static int rt_acct_proc_show(struct seq_file *m, void *v)
 567{
 568	struct ip_rt_acct *dst, *src;
 569	unsigned int i, j;
 570
 571	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
 572	if (!dst)
 573		return -ENOMEM;
 574
 575	for_each_possible_cpu(i) {
 576		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
 577		for (j = 0; j < 256; j++) {
 578			dst[j].o_bytes   += src[j].o_bytes;
 579			dst[j].o_packets += src[j].o_packets;
 580			dst[j].i_bytes   += src[j].i_bytes;
 581			dst[j].i_packets += src[j].i_packets;
 582		}
 583	}
 584
 585	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
 586	kfree(dst);
 587	return 0;
 588}
 589
 590static int rt_acct_proc_open(struct inode *inode, struct file *file)
 591{
 592	return single_open(file, rt_acct_proc_show, NULL);
 593}
 594
 595static const struct file_operations rt_acct_proc_fops = {
 596	.owner		= THIS_MODULE,
 597	.open		= rt_acct_proc_open,
 598	.read		= seq_read,
 599	.llseek		= seq_lseek,
 600	.release	= single_release,
 601};
 602#endif
 603
 604static int __net_init ip_rt_do_proc_init(struct net *net)
 605{
 606	struct proc_dir_entry *pde;
 607
 608	pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
 609			&rt_cache_seq_fops);
 610	if (!pde)
 611		goto err1;
 612
 613	pde = proc_create("rt_cache", S_IRUGO,
 614			  net->proc_net_stat, &rt_cpu_seq_fops);
 615	if (!pde)
 616		goto err2;
 617
 618#ifdef CONFIG_IP_ROUTE_CLASSID
 619	pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
 
 620	if (!pde)
 621		goto err3;
 622#endif
 623	return 0;
 624
 625#ifdef CONFIG_IP_ROUTE_CLASSID
 626err3:
 627	remove_proc_entry("rt_cache", net->proc_net_stat);
 628#endif
 629err2:
 630	remove_proc_entry("rt_cache", net->proc_net);
 631err1:
 632	return -ENOMEM;
 633}
 634
 635static void __net_exit ip_rt_do_proc_exit(struct net *net)
 636{
 637	remove_proc_entry("rt_cache", net->proc_net_stat);
 638	remove_proc_entry("rt_cache", net->proc_net);
 639#ifdef CONFIG_IP_ROUTE_CLASSID
 640	remove_proc_entry("rt_acct", net->proc_net);
 641#endif
 642}
 643
 644static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
 645	.init = ip_rt_do_proc_init,
 646	.exit = ip_rt_do_proc_exit,
 647};
 648
 649static int __init ip_rt_proc_init(void)
 650{
 651	return register_pernet_subsys(&ip_rt_proc_ops);
 652}
 653
 654#else
 655static inline int ip_rt_proc_init(void)
 656{
 657	return 0;
 658}
 659#endif /* CONFIG_PROC_FS */
 660
 661static inline void rt_free(struct rtable *rt)
 662{
 663	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 664}
 665
 666static inline void rt_drop(struct rtable *rt)
 667{
 668	ip_rt_put(rt);
 669	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 670}
 671
 672static inline int rt_fast_clean(struct rtable *rth)
 
 
 673{
 674	/* Kill broadcast/multicast entries very aggresively, if they
 675	   collide in hash table with more useful entries */
 676	return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
 677		rt_is_input_route(rth) && rth->dst.rt_next;
 678}
 679
 680static inline int rt_valuable(struct rtable *rth)
 681{
 682	return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
 683		(rth->peer && rth->peer->pmtu_expires);
 684}
 685
 686static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
 687{
 688	unsigned long age;
 689	int ret = 0;
 690
 691	if (atomic_read(&rth->dst.__refcnt))
 692		goto out;
 693
 694	age = jiffies - rth->dst.lastuse;
 695	if ((age <= tmo1 && !rt_fast_clean(rth)) ||
 696	    (age <= tmo2 && rt_valuable(rth)))
 697		goto out;
 698	ret = 1;
 699out:	return ret;
 700}
 701
 702/* Bits of score are:
 703 * 31: very valuable
 704 * 30: not quite useless
 705 * 29..0: usage counter
 706 */
 707static inline u32 rt_score(struct rtable *rt)
 708{
 709	u32 score = jiffies - rt->dst.lastuse;
 710
 711	score = ~score & ~(3<<30);
 712
 713	if (rt_valuable(rt))
 714		score |= (1<<31);
 
 
 
 
 715
 716	if (rt_is_output_route(rt) ||
 717	    !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
 718		score |= (1<<30);
 719
 720	return score;
 721}
 722
 723static inline bool rt_caching(const struct net *net)
 724{
 725	return net->ipv4.current_rt_cache_rebuild_count <=
 726		net->ipv4.sysctl_rt_cache_rebuild_count;
 727}
 728
 729static inline bool compare_hash_inputs(const struct rtable *rt1,
 730				       const struct rtable *rt2)
 731{
 732	return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
 733		((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
 734		(rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
 735}
 736
 737static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
 738{
 739	return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
 740		((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
 741		(rt1->rt_mark ^ rt2->rt_mark) |
 742		(rt1->rt_key_tos ^ rt2->rt_key_tos) |
 743		(rt1->rt_route_iif ^ rt2->rt_route_iif) |
 744		(rt1->rt_oif ^ rt2->rt_oif)) == 0;
 745}
 746
 747static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
 748{
 749	return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
 
 
 
 
 
 
 
 750}
 751
 752static inline int rt_is_expired(struct rtable *rth)
 
 
 
 
 
 
 
 
 
 
 
 753{
 754	return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 755}
 756
 757/*
 758 * Perform a full scan of hash table and free all entries.
 759 * Can be called by a softirq or a process.
 760 * In the later case, we want to be reschedule if necessary
 761 */
 762static void rt_do_flush(struct net *net, int process_context)
 763{
 764	unsigned int i;
 765	struct rtable *rth, *next;
 766
 767	for (i = 0; i <= rt_hash_mask; i++) {
 768		struct rtable __rcu **pprev;
 769		struct rtable *list;
 770
 771		if (process_context && need_resched())
 772			cond_resched();
 773		rth = rcu_access_pointer(rt_hash_table[i].chain);
 774		if (!rth)
 775			continue;
 776
 777		spin_lock_bh(rt_hash_lock_addr(i));
 778
 779		list = NULL;
 780		pprev = &rt_hash_table[i].chain;
 781		rth = rcu_dereference_protected(*pprev,
 782			lockdep_is_held(rt_hash_lock_addr(i)));
 783
 784		while (rth) {
 785			next = rcu_dereference_protected(rth->dst.rt_next,
 786				lockdep_is_held(rt_hash_lock_addr(i)));
 787
 788			if (!net ||
 789			    net_eq(dev_net(rth->dst.dev), net)) {
 790				rcu_assign_pointer(*pprev, next);
 791				rcu_assign_pointer(rth->dst.rt_next, list);
 792				list = rth;
 793			} else {
 794				pprev = &rth->dst.rt_next;
 795			}
 796			rth = next;
 797		}
 798
 799		spin_unlock_bh(rt_hash_lock_addr(i));
 800
 801		for (; list; list = next) {
 802			next = rcu_dereference_protected(list->dst.rt_next, 1);
 803			rt_free(list);
 804		}
 805	}
 806}
 
 807
 808/*
 809 * While freeing expired entries, we compute average chain length
 810 * and standard deviation, using fixed-point arithmetic.
 811 * This to have an estimation of rt_chain_length_max
 812 *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
 813 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
 814 */
 815
 816#define FRACT_BITS 3
 817#define ONE (1UL << FRACT_BITS)
 818
 819/*
 820 * Given a hash chain and an item in this hash chain,
 821 * find if a previous entry has the same hash_inputs
 822 * (but differs on tos, mark or oif)
 823 * Returns 0 if an alias is found.
 824 * Returns ONE if rth has no alias before itself.
 825 */
 826static int has_noalias(const struct rtable *head, const struct rtable *rth)
 827{
 828	const struct rtable *aux = head;
 829
 830	while (aux != rth) {
 831		if (compare_hash_inputs(aux, rth))
 832			return 0;
 833		aux = rcu_dereference_protected(aux->dst.rt_next, 1);
 834	}
 835	return ONE;
 836}
 837
 838static void rt_check_expire(void)
 
 
 
 839{
 840	static unsigned int rover;
 841	unsigned int i = rover, goal;
 842	struct rtable *rth;
 843	struct rtable __rcu **rthp;
 844	unsigned long samples = 0;
 845	unsigned long sum = 0, sum2 = 0;
 846	unsigned long delta;
 847	u64 mult;
 848
 849	delta = jiffies - expires_ljiffies;
 850	expires_ljiffies = jiffies;
 851	mult = ((u64)delta) << rt_hash_log;
 852	if (ip_rt_gc_timeout > 1)
 853		do_div(mult, ip_rt_gc_timeout);
 854	goal = (unsigned int)mult;
 855	if (goal > rt_hash_mask)
 856		goal = rt_hash_mask + 1;
 857	for (; goal > 0; goal--) {
 858		unsigned long tmo = ip_rt_gc_timeout;
 859		unsigned long length;
 860
 861		i = (i + 1) & rt_hash_mask;
 862		rthp = &rt_hash_table[i].chain;
 863
 864		if (need_resched())
 865			cond_resched();
 866
 867		samples++;
 868
 869		if (rcu_dereference_raw(*rthp) == NULL)
 870			continue;
 871		length = 0;
 872		spin_lock_bh(rt_hash_lock_addr(i));
 873		while ((rth = rcu_dereference_protected(*rthp,
 874					lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
 875			prefetch(rth->dst.rt_next);
 876			if (rt_is_expired(rth)) {
 877				*rthp = rth->dst.rt_next;
 878				rt_free(rth);
 879				continue;
 880			}
 881			if (rth->dst.expires) {
 882				/* Entry is expired even if it is in use */
 883				if (time_before_eq(jiffies, rth->dst.expires)) {
 884nofree:
 885					tmo >>= 1;
 886					rthp = &rth->dst.rt_next;
 887					/*
 888					 * We only count entries on
 889					 * a chain with equal hash inputs once
 890					 * so that entries for different QOS
 891					 * levels, and other non-hash input
 892					 * attributes don't unfairly skew
 893					 * the length computation
 894					 */
 895					length += has_noalias(rt_hash_table[i].chain, rth);
 896					continue;
 897				}
 898			} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
 899				goto nofree;
 900
 901			/* Cleanup aged off entries. */
 902			*rthp = rth->dst.rt_next;
 903			rt_free(rth);
 904		}
 905		spin_unlock_bh(rt_hash_lock_addr(i));
 906		sum += length;
 907		sum2 += length*length;
 908	}
 909	if (samples) {
 910		unsigned long avg = sum / samples;
 911		unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
 912		rt_chain_length_max = max_t(unsigned long,
 913					ip_rt_gc_elasticity,
 914					(avg + 4*sd) >> FRACT_BITS);
 915	}
 916	rover = i;
 917}
 918
 919/*
 920 * rt_worker_func() is run in process context.
 921 * we call rt_check_expire() to scan part of the hash table
 922 */
 923static void rt_worker_func(struct work_struct *work)
 924{
 925	rt_check_expire();
 926	schedule_delayed_work(&expires_work, ip_rt_gc_interval);
 927}
 928
 929/*
 930 * Perturbation of rt_genid by a small quantity [1..256]
 931 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
 932 * many times (2^24) without giving recent rt_genid.
 933 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
 934 */
 935static void rt_cache_invalidate(struct net *net)
 936{
 937	unsigned char shuffle;
 
 
 
 
 
 938
 939	get_random_bytes(&shuffle, sizeof(shuffle));
 940	atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
 941	inetpeer_invalidate_tree(AF_INET);
 942}
 943
 944/*
 945 * delay < 0  : invalidate cache (fast : entries will be deleted later)
 946 * delay >= 0 : invalidate & flush cache (can be long)
 947 */
 948void rt_cache_flush(struct net *net, int delay)
 949{
 950	rt_cache_invalidate(net);
 951	if (delay >= 0)
 952		rt_do_flush(net, !in_softirq());
 953}
 954
 955/* Flush previous cache invalidated entries from the cache */
 956void rt_cache_flush_batch(struct net *net)
 957{
 958	rt_do_flush(net, !in_softirq());
 
 
 
 
 
 
 
 
 959}
 960
 961static void rt_emergency_hash_rebuild(struct net *net)
 
 962{
 963	net_warn_ratelimited("Route hash chain too long!\n");
 964	rt_cache_invalidate(net);
 
 
 965}
 966
 967/*
 968   Short description of GC goals.
 969
 970   We want to build algorithm, which will keep routing cache
 971   at some equilibrium point, when number of aged off entries
 972   is kept approximately equal to newly generated ones.
 973
 974   Current expiration strength is variable "expire".
 975   We try to adjust it dynamically, so that if networking
 976   is idle expires is large enough to keep enough of warm entries,
 977   and when load increases it reduces to limit cache size.
 978 */
 979
 980static int rt_garbage_collect(struct dst_ops *ops)
 981{
 982	static unsigned long expire = RT_GC_TIMEOUT;
 983	static unsigned long last_gc;
 984	static int rover;
 985	static int equilibrium;
 986	struct rtable *rth;
 987	struct rtable __rcu **rthp;
 988	unsigned long now = jiffies;
 989	int goal;
 990	int entries = dst_entries_get_fast(&ipv4_dst_ops);
 991
 992	/*
 993	 * Garbage collection is pretty expensive,
 994	 * do not make it too frequently.
 995	 */
 996
 997	RT_CACHE_STAT_INC(gc_total);
 998
 999	if (now - last_gc < ip_rt_gc_min_interval &&
1000	    entries < ip_rt_max_size) {
1001		RT_CACHE_STAT_INC(gc_ignored);
1002		goto out;
 
1003	}
1004
1005	entries = dst_entries_get_slow(&ipv4_dst_ops);
1006	/* Calculate number of entries, which we want to expire now. */
1007	goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1008	if (goal <= 0) {
1009		if (equilibrium < ipv4_dst_ops.gc_thresh)
1010			equilibrium = ipv4_dst_ops.gc_thresh;
1011		goal = entries - equilibrium;
1012		if (goal > 0) {
1013			equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
1014			goal = entries - equilibrium;
1015		}
1016	} else {
1017		/* We are in dangerous area. Try to reduce cache really
1018		 * aggressively.
1019		 */
1020		goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
1021		equilibrium = entries - goal;
1022	}
1023
1024	if (now - last_gc >= ip_rt_gc_min_interval)
1025		last_gc = now;
1026
1027	if (goal <= 0) {
1028		equilibrium += goal;
1029		goto work_done;
1030	}
1031
1032	do {
1033		int i, k;
1034
1035		for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1036			unsigned long tmo = expire;
1037
1038			k = (k + 1) & rt_hash_mask;
1039			rthp = &rt_hash_table[k].chain;
1040			spin_lock_bh(rt_hash_lock_addr(k));
1041			while ((rth = rcu_dereference_protected(*rthp,
1042					lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
1043				if (!rt_is_expired(rth) &&
1044					!rt_may_expire(rth, tmo, expire)) {
1045					tmo >>= 1;
1046					rthp = &rth->dst.rt_next;
1047					continue;
1048				}
1049				*rthp = rth->dst.rt_next;
1050				rt_free(rth);
1051				goal--;
1052			}
1053			spin_unlock_bh(rt_hash_lock_addr(k));
1054			if (goal <= 0)
1055				break;
1056		}
1057		rover = k;
1058
1059		if (goal <= 0)
1060			goto work_done;
1061
1062		/* Goal is not achieved. We stop process if:
1063
1064		   - if expire reduced to zero. Otherwise, expire is halfed.
1065		   - if table is not full.
1066		   - if we are called from interrupt.
1067		   - jiffies check is just fallback/debug loop breaker.
1068		     We will not spin here for long time in any case.
1069		 */
1070
1071		RT_CACHE_STAT_INC(gc_goal_miss);
1072
1073		if (expire == 0)
1074			break;
1075
1076		expire >>= 1;
1077
1078		if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1079			goto out;
1080	} while (!in_softirq() && time_before_eq(jiffies, now));
1081
1082	if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1083		goto out;
1084	if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1085		goto out;
1086	net_warn_ratelimited("dst cache overflow\n");
1087	RT_CACHE_STAT_INC(gc_dst_overflow);
1088	return 1;
1089
1090work_done:
1091	expire += ip_rt_gc_min_interval;
1092	if (expire > ip_rt_gc_timeout ||
1093	    dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1094	    dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1095		expire = ip_rt_gc_timeout;
1096out:	return 0;
1097}
1098
1099/*
1100 * Returns number of entries in a hash chain that have different hash_inputs
1101 */
1102static int slow_chain_length(const struct rtable *head)
1103{
1104	int length = 0;
1105	const struct rtable *rth = head;
1106
1107	while (rth) {
1108		length += has_noalias(head, rth);
1109		rth = rcu_dereference_protected(rth->dst.rt_next, 1);
 
 
 
 
 
 
 
1110	}
1111	return length >> FRACT_BITS;
 
 
1112}
1113
1114static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
1115{
1116	static const __be32 inaddr_any = 0;
1117	struct net_device *dev = dst->dev;
1118	const __be32 *pkey = daddr;
1119	const struct rtable *rt;
1120	struct neighbour *n;
1121
1122	rt = (const struct rtable *) dst;
1123
1124	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1125		pkey = &inaddr_any;
1126	else if (rt->rt_gateway)
1127		pkey = (const __be32 *) &rt->rt_gateway;
1128
1129	n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
1130	if (n)
1131		return n;
1132	return neigh_create(&arp_tbl, pkey, dev);
1133}
1134
1135static int rt_bind_neighbour(struct rtable *rt)
1136{
1137	struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1138	if (IS_ERR(n))
1139		return PTR_ERR(n);
1140	dst_set_neighbour(&rt->dst, n);
1141
1142	return 0;
 
 
 
 
 
1143}
1144
1145static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
1146				     struct sk_buff *skb, int ifindex)
 
1147{
1148	struct rtable	*rth, *cand;
1149	struct rtable __rcu **rthp, **candp;
1150	unsigned long	now;
1151	u32 		min_score;
1152	int		chain_length;
1153	int attempts = !in_softirq();
1154
1155restart:
1156	chain_length = 0;
1157	min_score = ~(u32)0;
1158	cand = NULL;
1159	candp = NULL;
1160	now = jiffies;
1161
1162	if (!rt_caching(dev_net(rt->dst.dev))) {
1163		/*
1164		 * If we're not caching, just tell the caller we
1165		 * were successful and don't touch the route.  The
1166		 * caller hold the sole reference to the cache entry, and
1167		 * it will be released when the caller is done with it.
1168		 * If we drop it here, the callers have no way to resolve routes
1169		 * when we're not caching.  Instead, just point *rp at rt, so
1170		 * the caller gets a single use out of the route
1171		 * Note that we do rt_free on this new route entry, so that
1172		 * once its refcount hits zero, we are still able to reap it
1173		 * (Thanks Alexey)
1174		 * Note: To avoid expensive rcu stuff for this uncached dst,
1175		 * we set DST_NOCACHE so that dst_release() can free dst without
1176		 * waiting a grace period.
1177		 */
1178
1179		rt->dst.flags |= DST_NOCACHE;
1180		if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1181			int err = rt_bind_neighbour(rt);
1182			if (err) {
1183				net_warn_ratelimited("Neighbour table failure & not caching routes\n");
1184				ip_rt_put(rt);
1185				return ERR_PTR(err);
1186			}
1187		}
1188
1189		goto skip_hashing;
 
 
 
 
 
1190	}
1191
1192	rthp = &rt_hash_table[hash].chain;
1193
1194	spin_lock_bh(rt_hash_lock_addr(hash));
1195	while ((rth = rcu_dereference_protected(*rthp,
1196			lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1197		if (rt_is_expired(rth)) {
1198			*rthp = rth->dst.rt_next;
1199			rt_free(rth);
1200			continue;
1201		}
1202		if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1203			/* Put it first */
1204			*rthp = rth->dst.rt_next;
1205			/*
1206			 * Since lookup is lockfree, the deletion
1207			 * must be visible to another weakly ordered CPU before
1208			 * the insertion at the start of the hash chain.
1209			 */
1210			rcu_assign_pointer(rth->dst.rt_next,
1211					   rt_hash_table[hash].chain);
1212			/*
1213			 * Since lookup is lockfree, the update writes
1214			 * must be ordered for consistency on SMP.
1215			 */
1216			rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1217
1218			dst_use(&rth->dst, now);
1219			spin_unlock_bh(rt_hash_lock_addr(hash));
1220
1221			rt_drop(rt);
1222			if (skb)
1223				skb_dst_set(skb, &rth->dst);
1224			return rth;
1225		}
1226
1227		if (!atomic_read(&rth->dst.__refcnt)) {
1228			u32 score = rt_score(rth);
1229
1230			if (score <= min_score) {
1231				cand = rth;
1232				candp = rthp;
1233				min_score = score;
1234			}
1235		}
1236
1237		chain_length++;
1238
1239		rthp = &rth->dst.rt_next;
1240	}
1241
1242	if (cand) {
1243		/* ip_rt_gc_elasticity used to be average length of chain
1244		 * length, when exceeded gc becomes really aggressive.
1245		 *
1246		 * The second limit is less certain. At the moment it allows
1247		 * only 2 entries per bucket. We will see.
1248		 */
1249		if (chain_length > ip_rt_gc_elasticity) {
1250			*candp = cand->dst.rt_next;
1251			rt_free(cand);
1252		}
 
 
 
 
 
 
1253	} else {
1254		if (chain_length > rt_chain_length_max &&
1255		    slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1256			struct net *net = dev_net(rt->dst.dev);
1257			int num = ++net->ipv4.current_rt_cache_rebuild_count;
1258			if (!rt_caching(net)) {
1259				pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
1260					rt->dst.dev->name, num);
1261			}
1262			rt_emergency_hash_rebuild(net);
1263			spin_unlock_bh(rt_hash_lock_addr(hash));
1264
1265			hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1266					ifindex, rt_genid(net));
1267			goto restart;
1268		}
1269	}
1270
1271	/* Try to bind route to arp only if it is output
1272	   route or unicast forwarding path.
1273	 */
1274	if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1275		int err = rt_bind_neighbour(rt);
1276		if (err) {
1277			spin_unlock_bh(rt_hash_lock_addr(hash));
1278
1279			if (err != -ENOBUFS) {
1280				rt_drop(rt);
1281				return ERR_PTR(err);
1282			}
1283
1284			/* Neighbour tables are full and nothing
1285			   can be released. Try to shrink route cache,
1286			   it is most likely it holds some neighbour records.
1287			 */
1288			if (attempts-- > 0) {
1289				int saved_elasticity = ip_rt_gc_elasticity;
1290				int saved_int = ip_rt_gc_min_interval;
1291				ip_rt_gc_elasticity	= 1;
1292				ip_rt_gc_min_interval	= 0;
1293				rt_garbage_collect(&ipv4_dst_ops);
1294				ip_rt_gc_min_interval	= saved_int;
1295				ip_rt_gc_elasticity	= saved_elasticity;
1296				goto restart;
1297			}
1298
1299			net_warn_ratelimited("Neighbour table overflow\n");
1300			rt_drop(rt);
1301			return ERR_PTR(-ENOBUFS);
1302		}
1303	}
1304
1305	rt->dst.rt_next = rt_hash_table[hash].chain;
1306
1307	/*
1308	 * Since lookup is lockfree, we must make sure
1309	 * previous writes to rt are committed to memory
1310	 * before making rt visible to other CPUS.
1311	 */
1312	rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1313
1314	spin_unlock_bh(rt_hash_lock_addr(hash));
1315
1316skip_hashing:
1317	if (skb)
1318		skb_dst_set(skb, &rt->dst);
1319	return rt;
1320}
1321
1322static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1323
1324static u32 rt_peer_genid(void)
1325{
1326	return atomic_read(&__rt_peer_genid);
1327}
1328
1329void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1330{
1331	struct inet_peer *peer;
1332
1333	peer = inet_getpeer_v4(daddr, create);
1334
1335	if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1336		inet_putpeer(peer);
1337	else
1338		rt->rt_peer_genid = rt_peer_genid();
1339}
1340
1341/*
1342 * Peer allocation may fail only in serious out-of-memory conditions.  However
1343 * we still can generate some output.
1344 * Random ID selection looks a bit dangerous because we have no chances to
1345 * select ID being unique in a reasonable period of time.
1346 * But broken packet identifier may be better than no packet at all.
1347 */
1348static void ip_select_fb_ident(struct iphdr *iph)
1349{
1350	static DEFINE_SPINLOCK(ip_fb_id_lock);
1351	static u32 ip_fallback_id;
1352	u32 salt;
1353
1354	spin_lock_bh(&ip_fb_id_lock);
1355	salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1356	iph->id = htons(salt & 0xFFFF);
1357	ip_fallback_id = salt;
1358	spin_unlock_bh(&ip_fb_id_lock);
1359}
1360
1361void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1362{
1363	struct rtable *rt = (struct rtable *) dst;
 
 
 
1364
1365	if (rt && !(rt->dst.flags & DST_NOPEER)) {
1366		if (rt->peer == NULL)
1367			rt_bind_peer(rt, rt->rt_dst, 1);
1368
1369		/* If peer is attached to destination, it is never detached,
1370		   so that we need not to grab a lock to dereference it.
 
1371		 */
1372		if (rt->peer) {
1373			iph->id = htons(inet_getid(rt->peer, more));
1374			return;
1375		}
1376	} else if (!rt)
1377		pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
1378
1379	ip_select_fb_ident(iph);
1380}
1381EXPORT_SYMBOL(__ip_select_ident);
1382
1383static void rt_del(unsigned int hash, struct rtable *rt)
1384{
1385	struct rtable __rcu **rthp;
1386	struct rtable *aux;
1387
1388	rthp = &rt_hash_table[hash].chain;
1389	spin_lock_bh(rt_hash_lock_addr(hash));
1390	ip_rt_put(rt);
1391	while ((aux = rcu_dereference_protected(*rthp,
1392			lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1393		if (aux == rt || rt_is_expired(aux)) {
1394			*rthp = aux->dst.rt_next;
1395			rt_free(aux);
1396			continue;
1397		}
1398		rthp = &aux->dst.rt_next;
1399	}
1400	spin_unlock_bh(rt_hash_lock_addr(hash));
 
 
 
 
1401}
1402
1403static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
 
1404{
1405	struct rtable *rt = (struct rtable *) dst;
1406	__be32 orig_gw = rt->rt_gateway;
1407	struct neighbour *n, *old_n;
1408
1409	dst_confirm(&rt->dst);
 
 
1410
1411	rt->rt_gateway = peer->redirect_learned.a4;
 
 
 
 
 
1412
1413	n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1414	if (IS_ERR(n)) {
1415		rt->rt_gateway = orig_gw;
1416		return;
1417	}
1418	old_n = xchg(&rt->dst._neighbour, n);
1419	if (old_n)
1420		neigh_release(old_n);
1421	if (!(n->nud_state & NUD_VALID)) {
1422		neigh_event_send(n, NULL);
1423	} else {
1424		rt->rt_flags |= RTCF_REDIRECTED;
1425		call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1426	}
1427}
1428
1429/* called in rcu_read_lock() section */
1430void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1431		    __be32 saddr, struct net_device *dev)
1432{
1433	int s, i;
1434	struct in_device *in_dev = __in_dev_get_rcu(dev);
1435	__be32 skeys[2] = { saddr, 0 };
1436	int    ikeys[2] = { dev->ifindex, 0 };
1437	struct inet_peer *peer;
1438	struct net *net;
1439
 
1440	if (!in_dev)
1441		return;
1442
1443	net = dev_net(dev);
1444	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1445	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1446	    ipv4_is_zeronet(new_gw))
1447		goto reject_redirect;
1448
1449	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1450		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1451			goto reject_redirect;
1452		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1453			goto reject_redirect;
1454	} else {
1455		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1456			goto reject_redirect;
1457	}
1458
1459	for (s = 0; s < 2; s++) {
1460		for (i = 0; i < 2; i++) {
1461			unsigned int hash;
1462			struct rtable __rcu **rthp;
1463			struct rtable *rt;
1464
1465			hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
1466
1467			rthp = &rt_hash_table[hash].chain;
1468
1469			while ((rt = rcu_dereference(*rthp)) != NULL) {
1470				rthp = &rt->dst.rt_next;
1471
1472				if (rt->rt_key_dst != daddr ||
1473				    rt->rt_key_src != skeys[s] ||
1474				    rt->rt_oif != ikeys[i] ||
1475				    rt_is_input_route(rt) ||
1476				    rt_is_expired(rt) ||
1477				    !net_eq(dev_net(rt->dst.dev), net) ||
1478				    rt->dst.error ||
1479				    rt->dst.dev != dev ||
1480				    rt->rt_gateway != old_gw)
1481					continue;
1482
1483				if (!rt->peer)
1484					rt_bind_peer(rt, rt->rt_dst, 1);
1485
1486				peer = rt->peer;
1487				if (peer) {
1488					if (peer->redirect_learned.a4 != new_gw) {
1489						peer->redirect_learned.a4 = new_gw;
1490						atomic_inc(&__rt_peer_genid);
1491					}
1492					check_peer_redir(&rt->dst, peer);
1493				}
1494			}
 
 
 
1495		}
 
1496	}
1497	return;
1498
1499reject_redirect:
1500#ifdef CONFIG_IP_ROUTE_VERBOSE
1501	if (IN_DEV_LOG_MARTIANS(in_dev))
 
 
 
 
1502		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
1503				     "  Advised path = %pI4 -> %pI4\n",
1504				     &old_gw, dev->name, &new_gw,
1505				     &saddr, &daddr);
 
1506#endif
1507	;
1508}
1509
1510static bool peer_pmtu_expired(struct inet_peer *peer)
1511{
1512	unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1513
1514	return orig &&
1515	       time_after_eq(jiffies, orig) &&
1516	       cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1517}
 
 
1518
1519static bool peer_pmtu_cleaned(struct inet_peer *peer)
1520{
1521	unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1522
1523	return orig &&
1524	       cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1525}
1526
1527static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
 
1528{
1529	struct rtable *rt = (struct rtable *)dst;
1530	struct dst_entry *ret = dst;
1531
1532	if (rt) {
1533		if (dst->obsolete > 0) {
1534			ip_rt_put(rt);
1535			ret = NULL;
1536		} else if (rt->rt_flags & RTCF_REDIRECTED) {
1537			unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1538						rt->rt_oif,
1539						rt_genid(dev_net(dst->dev)));
1540			rt_del(hash, rt);
1541			ret = NULL;
1542		} else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1543			dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1544		}
1545	}
1546	return ret;
1547}
1548
1549/*
1550 * Algorithm:
1551 *	1. The first ip_rt_redirect_number redirects are sent
1552 *	   with exponential backoff, then we stop sending them at all,
1553 *	   assuming that the host ignores our redirects.
1554 *	2. If we did not see packets requiring redirects
1555 *	   during ip_rt_redirect_silence, we assume that the host
1556 *	   forgot redirected route and start to send redirects again.
1557 *
1558 * This algorithm is much cheaper and more intelligent than dumb load limiting
1559 * in icmp.c.
1560 *
1561 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1562 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1563 */
1564
1565void ip_rt_send_redirect(struct sk_buff *skb)
1566{
1567	struct rtable *rt = skb_rtable(skb);
1568	struct in_device *in_dev;
1569	struct inet_peer *peer;
 
1570	int log_martians;
 
1571
1572	rcu_read_lock();
1573	in_dev = __in_dev_get_rcu(rt->dst.dev);
1574	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1575		rcu_read_unlock();
1576		return;
1577	}
1578	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
 
1579	rcu_read_unlock();
1580
1581	if (!rt->peer)
1582		rt_bind_peer(rt, rt->rt_dst, 1);
1583	peer = rt->peer;
1584	if (!peer) {
1585		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
 
1586		return;
1587	}
1588
1589	/* No redirected packets during ip_rt_redirect_silence;
1590	 * reset the algorithm.
1591	 */
1592	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1593		peer->rate_tokens = 0;
 
 
1594
1595	/* Too many ignored redirects; do not send anything
1596	 * set dst.rate_last to the last seen redirected packet.
1597	 */
1598	if (peer->rate_tokens >= ip_rt_redirect_number) {
1599		peer->rate_last = jiffies;
1600		return;
1601	}
1602
1603	/* Check for load limit; set rate_last to the latest sent
1604	 * redirect.
1605	 */
1606	if (peer->rate_tokens == 0 ||
1607	    time_after(jiffies,
1608		       (peer->rate_last +
1609			(ip_rt_redirect_load << peer->rate_tokens)))) {
1610		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
 
 
1611		peer->rate_last = jiffies;
1612		++peer->rate_tokens;
1613#ifdef CONFIG_IP_ROUTE_VERBOSE
1614		if (log_martians &&
1615		    peer->rate_tokens == ip_rt_redirect_number)
1616			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
1617					     &ip_hdr(skb)->saddr, rt->rt_iif,
1618					     &rt->rt_dst, &rt->rt_gateway);
1619#endif
1620	}
 
 
1621}
1622
1623static int ip_error(struct sk_buff *skb)
1624{
1625	struct rtable *rt = skb_rtable(skb);
 
 
1626	struct inet_peer *peer;
1627	unsigned long now;
 
 
1628	bool send;
1629	int code;
1630
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1631	switch (rt->dst.error) {
1632	case EINVAL:
1633	default:
1634		goto out;
1635	case EHOSTUNREACH:
1636		code = ICMP_HOST_UNREACH;
1637		break;
1638	case ENETUNREACH:
1639		code = ICMP_NET_UNREACH;
1640		IP_INC_STATS_BH(dev_net(rt->dst.dev),
1641				IPSTATS_MIB_INNOROUTES);
1642		break;
1643	case EACCES:
1644		code = ICMP_PKT_FILTERED;
1645		break;
1646	}
1647
1648	if (!rt->peer)
1649		rt_bind_peer(rt, rt->rt_dst, 1);
1650	peer = rt->peer;
1651
1652	send = true;
1653	if (peer) {
1654		now = jiffies;
1655		peer->rate_tokens += now - peer->rate_last;
1656		if (peer->rate_tokens > ip_rt_error_burst)
1657			peer->rate_tokens = ip_rt_error_burst;
1658		peer->rate_last = now;
1659		if (peer->rate_tokens >= ip_rt_error_cost)
1660			peer->rate_tokens -= ip_rt_error_cost;
1661		else
1662			send = false;
 
1663	}
1664	if (send)
1665		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1666
1667out:	kfree_skb(skb);
1668	return 0;
1669}
1670
1671/*
1672 *	The last two values are not from the RFC but
1673 *	are needed for AMPRnet AX.25 paths.
1674 */
 
 
 
1675
1676static const unsigned short mtu_plateau[] =
1677{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1678
1679static inline unsigned short guess_mtu(unsigned short old_mtu)
1680{
1681	int i;
 
 
 
 
 
1682
1683	for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1684		if (old_mtu > mtu_plateau[i])
1685			return mtu_plateau[i];
1686	return 68;
 
 
 
 
 
 
 
 
 
 
1687}
1688
1689unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1690				 unsigned short new_mtu,
1691				 struct net_device *dev)
1692{
1693	unsigned short old_mtu = ntohs(iph->tot_len);
1694	unsigned short est_mtu = 0;
1695	struct inet_peer *peer;
1696
1697	peer = inet_getpeer_v4(iph->daddr, 1);
1698	if (peer) {
1699		unsigned short mtu = new_mtu;
1700
1701		if (new_mtu < 68 || new_mtu >= old_mtu) {
1702			/* BSD 4.2 derived systems incorrectly adjust
1703			 * tot_len by the IP header length, and report
1704			 * a zero MTU in the ICMP message.
1705			 */
1706			if (mtu == 0 &&
1707			    old_mtu >= 68 + (iph->ihl << 2))
1708				old_mtu -= iph->ihl << 2;
1709			mtu = guess_mtu(old_mtu);
1710		}
1711
1712		if (mtu < ip_rt_min_pmtu)
1713			mtu = ip_rt_min_pmtu;
1714		if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1715			unsigned long pmtu_expires;
1716
1717			pmtu_expires = jiffies + ip_rt_mtu_expires;
1718			if (!pmtu_expires)
1719				pmtu_expires = 1UL;
1720
1721			est_mtu = mtu;
1722			peer->pmtu_learned = mtu;
1723			peer->pmtu_expires = pmtu_expires;
1724			atomic_inc(&__rt_peer_genid);
1725		}
1726
1727		inet_putpeer(peer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728	}
1729	return est_mtu ? : new_mtu;
1730}
 
1731
1732static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1733{
1734	unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
 
 
1735
1736	if (!expires)
1737		return;
1738	if (time_before(jiffies, expires)) {
1739		u32 orig_dst_mtu = dst_mtu(dst);
1740		if (peer->pmtu_learned < orig_dst_mtu) {
1741			if (!peer->pmtu_orig)
1742				peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1743			dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1744		}
1745	} else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1746		dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1747}
1748
1749static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1750{
1751	struct rtable *rt = (struct rtable *) dst;
1752	struct inet_peer *peer;
 
 
 
 
1753
1754	dst_confirm(dst);
1755
1756	if (!rt->peer)
1757		rt_bind_peer(rt, rt->rt_dst, 1);
1758	peer = rt->peer;
1759	if (peer) {
1760		unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1761
1762		if (mtu < ip_rt_min_pmtu)
1763			mtu = ip_rt_min_pmtu;
1764		if (!pmtu_expires || mtu < peer->pmtu_learned) {
1765
1766			pmtu_expires = jiffies + ip_rt_mtu_expires;
1767			if (!pmtu_expires)
1768				pmtu_expires = 1UL;
 
1769
1770			peer->pmtu_learned = mtu;
1771			peer->pmtu_expires = pmtu_expires;
1772
1773			atomic_inc(&__rt_peer_genid);
1774			rt->rt_peer_genid = rt_peer_genid();
1775		}
1776		check_peer_pmtu(dst, peer);
 
 
 
1777	}
1778}
1779
 
1780
1781static void ipv4_validate_peer(struct rtable *rt)
1782{
1783	if (rt->rt_peer_genid != rt_peer_genid()) {
1784		struct inet_peer *peer;
1785
1786		if (!rt->peer)
1787			rt_bind_peer(rt, rt->rt_dst, 0);
 
1788
1789		peer = rt->peer;
1790		if (peer) {
1791			check_peer_pmtu(&rt->dst, peer);
1792
1793			if (peer->redirect_learned.a4 &&
1794			    peer->redirect_learned.a4 != rt->rt_gateway)
1795				check_peer_redir(&rt->dst, peer);
1796		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1797
1798		rt->rt_peer_genid = rt_peer_genid();
 
 
 
 
1799	}
1800}
 
1801
1802static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 
1803{
1804	struct rtable *rt = (struct rtable *) dst;
1805
1806	if (rt_is_expired(rt))
 
 
 
 
 
 
 
 
1807		return NULL;
1808	ipv4_validate_peer(rt);
1809	return dst;
1810}
 
1811
1812static void ipv4_dst_destroy(struct dst_entry *dst)
1813{
1814	struct rtable *rt = (struct rtable *) dst;
1815	struct inet_peer *peer = rt->peer;
 
1816
1817	if (rt->fi) {
1818		fib_info_put(rt->fi);
1819		rt->fi = NULL;
1820	}
1821	if (peer) {
1822		rt->peer = NULL;
1823		inet_putpeer(peer);
 
 
 
 
 
 
 
 
 
 
 
 
 
1824	}
 
1825}
1826
1827
1828static void ipv4_link_failure(struct sk_buff *skb)
1829{
1830	struct rtable *rt;
1831
1832	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1833
1834	rt = skb_rtable(skb);
1835	if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1836		dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1837}
1838
1839static int ip_rt_bug(struct sk_buff *skb)
1840{
1841	pr_debug("%s: %pI4 -> %pI4, %s\n",
1842		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1843		 skb->dev ? skb->dev->name : "?");
1844	kfree_skb(skb);
1845	WARN_ON(1);
1846	return 0;
1847}
1848
1849/*
1850   We do not cache source address of outgoing interface,
1851   because it is used only by IP RR, TS and SRR options,
1852   so that it out of fast path.
1853
1854   BTW remember: "addr" is allowed to be not aligned
1855   in IP options!
1856 */
1857
1858void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1859{
1860	__be32 src;
1861
1862	if (rt_is_output_route(rt))
1863		src = ip_hdr(skb)->saddr;
1864	else {
1865		struct fib_result res;
1866		struct flowi4 fl4;
1867		struct iphdr *iph;
1868
1869		iph = ip_hdr(skb);
1870
1871		memset(&fl4, 0, sizeof(fl4));
1872		fl4.daddr = iph->daddr;
1873		fl4.saddr = iph->saddr;
1874		fl4.flowi4_tos = RT_TOS(iph->tos);
1875		fl4.flowi4_oif = rt->dst.dev->ifindex;
1876		fl4.flowi4_iif = skb->dev->ifindex;
1877		fl4.flowi4_mark = skb->mark;
1878
1879		rcu_read_lock();
1880		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1881			src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1882		else
1883			src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1884					RT_SCOPE_UNIVERSE);
 
1885		rcu_read_unlock();
1886	}
1887	memcpy(addr, &src, 4);
1888}
1889
1890#ifdef CONFIG_IP_ROUTE_CLASSID
1891static void set_class_tag(struct rtable *rt, u32 tag)
1892{
1893	if (!(rt->dst.tclassid & 0xFFFF))
1894		rt->dst.tclassid |= tag & 0xFFFF;
1895	if (!(rt->dst.tclassid & 0xFFFF0000))
1896		rt->dst.tclassid |= tag & 0xFFFF0000;
1897}
1898#endif
1899
1900static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1901{
1902	unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
 
 
 
1903
1904	if (advmss == 0) {
1905		advmss = max_t(unsigned int, dst->dev->mtu - 40,
1906			       ip_rt_min_advmss);
1907		if (advmss > 65535 - 40)
1908			advmss = 65535 - 40;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1909	}
1910	return advmss;
 
1911}
1912
1913static unsigned int ipv4_mtu(const struct dst_entry *dst)
 
1914{
1915	const struct rtable *rt = (const struct rtable *) dst;
1916	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1917
1918	if (mtu && rt_is_output_route(rt))
1919		return mtu;
 
 
 
 
1920
1921	mtu = dst->dev->mtu;
 
 
1922
1923	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
 
1924
1925		if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1926			mtu = 576;
 
1927	}
1928
1929	if (mtu > IP_MAX_MTU)
1930		mtu = IP_MAX_MTU;
1931
1932	return mtu;
1933}
1934
1935static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1936			    struct fib_info *fi)
1937{
1938	struct inet_peer *peer;
1939	int create = 0;
1940
1941	/* If a peer entry exists for this destination, we must hook
1942	 * it up in order to get at cached metrics.
1943	 */
1944	if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1945		create = 1;
1946
1947	rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
1948	if (peer) {
1949		rt->rt_peer_genid = rt_peer_genid();
1950		if (inet_metrics_new(peer))
1951			memcpy(peer->metrics, fi->fib_metrics,
1952			       sizeof(u32) * RTAX_MAX);
1953		dst_init_metrics(&rt->dst, peer->metrics, false);
1954
1955		check_peer_pmtu(&rt->dst, peer);
1956
1957		if (peer->redirect_learned.a4 &&
1958		    peer->redirect_learned.a4 != rt->rt_gateway) {
1959			rt->rt_gateway = peer->redirect_learned.a4;
1960			rt->rt_flags |= RTCF_REDIRECTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1961		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1962	} else {
1963		if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1964			rt->fi = fi;
1965			atomic_inc(&fi->fib_clntref);
 
 
 
 
 
 
 
 
 
 
1966		}
1967		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968	}
1969}
1970
1971static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1972			   const struct fib_result *res,
1973			   struct fib_info *fi, u16 type, u32 itag)
 
 
1974{
1975	struct dst_entry *dst = &rt->dst;
1976
1977	if (fi) {
1978		if (FIB_RES_GW(*res) &&
1979		    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1980			rt->rt_gateway = FIB_RES_GW(*res);
1981		rt_init_metrics(rt, fl4, fi);
 
 
 
 
 
 
 
 
 
 
1982#ifdef CONFIG_IP_ROUTE_CLASSID
1983		dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1984#endif
1985	}
1986
1987	if (dst_mtu(dst) > IP_MAX_MTU)
1988		dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1989	if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1990		dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1991
1992#ifdef CONFIG_IP_ROUTE_CLASSID
1993#ifdef CONFIG_IP_MULTIPLE_TABLES
1994	set_class_tag(rt, fib_rules_tclass(res));
1995#endif
1996	set_class_tag(rt, itag);
1997#endif
1998}
1999
2000static struct rtable *rt_dst_alloc(struct net_device *dev,
2001				   bool nopolicy, bool noxfrm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002{
2003	return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
2004			 DST_HOST |
2005			 (nopolicy ? DST_NOPOLICY : 0) |
2006			 (noxfrm ? DST_NOXFRM : 0));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2007}
 
2008
2009/* called in rcu_read_lock() section */
2010static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2011				u8 tos, struct net_device *dev, int our)
 
2012{
2013	unsigned int hash;
2014	struct rtable *rth;
2015	__be32 spec_dst;
2016	struct in_device *in_dev = __in_dev_get_rcu(dev);
2017	u32 itag = 0;
2018	int err;
2019
2020	/* Primary sanity checks. */
2021
2022	if (in_dev == NULL)
2023		return -EINVAL;
2024
2025	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2026	    ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
2027		goto e_inval;
 
 
 
2028
2029	if (ipv4_is_zeronet(saddr)) {
2030		if (!ipv4_is_local_multicast(daddr))
2031			goto e_inval;
2032		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2033	} else {
2034		err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2035					  &itag);
2036		if (err < 0)
2037			goto e_err;
2038	}
2039	rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
2040			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2041	if (!rth)
2042		goto e_nobufs;
2043
2044#ifdef CONFIG_IP_ROUTE_CLASSID
2045	rth->dst.tclassid = itag;
2046#endif
2047	rth->dst.output = ip_rt_bug;
2048
2049	rth->rt_key_dst	= daddr;
2050	rth->rt_key_src	= saddr;
2051	rth->rt_genid	= rt_genid(dev_net(dev));
2052	rth->rt_flags	= RTCF_MULTICAST;
2053	rth->rt_type	= RTN_MULTICAST;
2054	rth->rt_key_tos	= tos;
2055	rth->rt_dst	= daddr;
2056	rth->rt_src	= saddr;
2057	rth->rt_route_iif = dev->ifindex;
2058	rth->rt_iif	= dev->ifindex;
2059	rth->rt_oif	= 0;
2060	rth->rt_mark    = skb->mark;
2061	rth->rt_gateway	= daddr;
2062	rth->rt_spec_dst= spec_dst;
2063	rth->rt_peer_genid = 0;
2064	rth->peer = NULL;
2065	rth->fi = NULL;
2066	if (our) {
2067		rth->dst.input= ip_local_deliver;
2068		rth->rt_flags |= RTCF_LOCAL;
2069	}
2070
2071#ifdef CONFIG_IP_MROUTE
2072	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
2073		rth->dst.input = ip_mr_input;
2074#endif
2075	RT_CACHE_STAT_INC(in_slow_mc);
2076
2077	hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
2078	rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
2079	return IS_ERR(rth) ? PTR_ERR(rth) : 0;
2080
2081e_nobufs:
2082	return -ENOBUFS;
2083e_inval:
2084	return -EINVAL;
2085e_err:
2086	return err;
2087}
2088
2089
2090static void ip_handle_martian_source(struct net_device *dev,
2091				     struct in_device *in_dev,
2092				     struct sk_buff *skb,
2093				     __be32 daddr,
2094				     __be32 saddr)
2095{
2096	RT_CACHE_STAT_INC(in_martian_src);
2097#ifdef CONFIG_IP_ROUTE_VERBOSE
2098	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
2099		/*
2100		 *	RFC1812 recommendation, if source is martian,
2101		 *	the only hint is MAC header.
2102		 */
2103		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
2104			&daddr, &saddr, dev->name);
2105		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
2106			print_hex_dump(KERN_WARNING, "ll header: ",
2107				       DUMP_PREFIX_OFFSET, 16, 1,
2108				       skb_mac_header(skb),
2109				       dev->hard_header_len, true);
2110		}
2111	}
2112#endif
2113}
2114
2115/* called in rcu_read_lock() section */
2116static int __mkroute_input(struct sk_buff *skb,
2117			   const struct fib_result *res,
2118			   struct in_device *in_dev,
2119			   __be32 daddr, __be32 saddr, u32 tos,
2120			   struct rtable **result)
2121{
 
 
 
2122	struct rtable *rth;
2123	int err;
2124	struct in_device *out_dev;
2125	unsigned int flags = 0;
2126	__be32 spec_dst;
2127	u32 itag;
2128
2129	/* get a working reference to the output device */
2130	out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
2131	if (out_dev == NULL) {
2132		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
2133		return -EINVAL;
2134	}
2135
2136
2137	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
2138				  in_dev->dev, &spec_dst, &itag);
2139	if (err < 0) {
2140		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
2141					 saddr);
2142
2143		goto cleanup;
2144	}
2145
2146	if (err)
2147		flags |= RTCF_DIRECTSRC;
2148
2149	if (out_dev == in_dev && err &&
2150	    (IN_DEV_SHARED_MEDIA(out_dev) ||
2151	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2152		flags |= RTCF_DOREDIRECT;
 
 
 
2153
2154	if (skb->protocol != htons(ETH_P_IP)) {
2155		/* Not IP (i.e. ARP). Do not create route, if it is
2156		 * invalid for proxy arp. DNAT routes are always valid.
2157		 *
2158		 * Proxy arp feature have been extended to allow, ARP
2159		 * replies back to the same interface, to support
2160		 * Private VLAN switch technologies. See arp.c.
2161		 */
2162		if (out_dev == in_dev &&
2163		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2164			err = -EINVAL;
2165			goto cleanup;
2166		}
2167	}
2168
2169	rth = rt_dst_alloc(out_dev->dev,
2170			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
2171			   IN_DEV_CONF_GET(out_dev, NOXFRM));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2172	if (!rth) {
2173		err = -ENOBUFS;
2174		goto cleanup;
2175	}
2176
2177	rth->rt_key_dst	= daddr;
2178	rth->rt_key_src	= saddr;
2179	rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2180	rth->rt_flags = flags;
2181	rth->rt_type = res->type;
2182	rth->rt_key_tos	= tos;
2183	rth->rt_dst	= daddr;
2184	rth->rt_src	= saddr;
2185	rth->rt_route_iif = in_dev->dev->ifindex;
2186	rth->rt_iif 	= in_dev->dev->ifindex;
2187	rth->rt_oif 	= 0;
2188	rth->rt_mark    = skb->mark;
2189	rth->rt_gateway	= daddr;
2190	rth->rt_spec_dst= spec_dst;
2191	rth->rt_peer_genid = 0;
2192	rth->peer = NULL;
2193	rth->fi = NULL;
2194
2195	rth->dst.input = ip_forward;
2196	rth->dst.output = ip_output;
2197
2198	rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2199
2200	*result = rth;
 
 
2201	err = 0;
2202 cleanup:
2203	return err;
2204}
2205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2206static int ip_mkroute_input(struct sk_buff *skb,
2207			    struct fib_result *res,
2208			    const struct flowi4 *fl4,
2209			    struct in_device *in_dev,
2210			    __be32 daddr, __be32 saddr, u32 tos)
 
2211{
2212	struct rtable *rth = NULL;
2213	int err;
2214	unsigned int hash;
2215
2216#ifdef CONFIG_IP_ROUTE_MULTIPATH
2217	if (res->fi && res->fi->fib_nhs > 1)
2218		fib_select_multipath(res);
 
 
 
 
2219#endif
2220
2221	/* create a routing cache entry */
2222	err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2223	if (err)
2224		return err;
2225
2226	/* put it into the cache */
2227	hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
2228		       rt_genid(dev_net(rth->dst.dev)));
2229	rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
2230	if (IS_ERR(rth))
2231		return PTR_ERR(rth);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2232	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2233}
2234
2235/*
2236 *	NOTE. We drop all the packets that has local source
2237 *	addresses, because every properly looped back packet
2238 *	must have correct destination already attached by output routine.
 
 
2239 *
2240 *	Such approach solves two big problems:
2241 *	1. Not simplex devices are handled properly.
2242 *	2. IP spoofing attempts are filtered with 100% of guarantee.
2243 *	called with rcu_read_lock()
2244 */
2245
2246static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2247			       u8 tos, struct net_device *dev)
 
2248{
2249	struct fib_result res;
2250	struct in_device *in_dev = __in_dev_get_rcu(dev);
2251	struct flowi4	fl4;
 
 
 
2252	unsigned int	flags = 0;
2253	u32		itag = 0;
2254	struct rtable	*rth;
2255	unsigned int	hash;
2256	__be32		spec_dst;
2257	int		err = -EINVAL;
2258	struct net    *net = dev_net(dev);
2259
2260	/* IP on this device is disabled. */
2261
2262	if (!in_dev)
2263		goto out;
2264
2265	/* Check for the most weird martians, which can be not detected
2266	   by fib_lookup.
2267	 */
2268
2269	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2270	    ipv4_is_loopback(saddr))
 
 
 
 
 
 
2271		goto martian_source;
2272
 
 
2273	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2274		goto brd_input;
2275
2276	/* Accept zero addresses only to limited broadcast;
2277	 * I even do not know to fix it or not. Waiting for complains :-)
2278	 */
2279	if (ipv4_is_zeronet(saddr))
2280		goto martian_source;
2281
2282	if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2283		goto martian_destination;
2284
 
 
 
 
 
 
 
 
 
 
 
2285	/*
2286	 *	Now we are ready to route packet.
2287	 */
 
2288	fl4.flowi4_oif = 0;
2289	fl4.flowi4_iif = dev->ifindex;
2290	fl4.flowi4_mark = skb->mark;
2291	fl4.flowi4_tos = tos;
2292	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
 
2293	fl4.daddr = daddr;
2294	fl4.saddr = saddr;
2295	err = fib_lookup(net, &fl4, &res);
 
 
 
 
 
 
 
 
 
 
 
2296	if (err != 0) {
2297		if (!IN_DEV_FORWARD(in_dev))
2298			goto e_hostunreach;
2299		goto no_route;
2300	}
2301
2302	RT_CACHE_STAT_INC(in_slow_tot);
2303
2304	if (res.type == RTN_BROADCAST)
 
 
 
2305		goto brd_input;
 
2306
2307	if (res.type == RTN_LOCAL) {
2308		err = fib_validate_source(skb, saddr, daddr, tos,
2309					  net->loopback_dev->ifindex,
2310					  dev, &spec_dst, &itag);
2311		if (err < 0)
2312			goto martian_source_keep_err;
2313		if (err)
2314			flags |= RTCF_DIRECTSRC;
2315		spec_dst = daddr;
2316		goto local_input;
2317	}
2318
2319	if (!IN_DEV_FORWARD(in_dev))
2320		goto e_hostunreach;
2321	if (res.type != RTN_UNICAST)
 
 
2322		goto martian_destination;
2323
2324	err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
 
2325out:	return err;
2326
2327brd_input:
2328	if (skb->protocol != htons(ETH_P_IP))
2329		goto e_inval;
2330
2331	if (ipv4_is_zeronet(saddr))
2332		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2333	else {
2334		err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2335					  &itag);
2336		if (err < 0)
2337			goto martian_source_keep_err;
2338		if (err)
2339			flags |= RTCF_DIRECTSRC;
2340	}
2341	flags |= RTCF_BROADCAST;
2342	res.type = RTN_BROADCAST;
2343	RT_CACHE_STAT_INC(in_brd);
2344
2345local_input:
2346	rth = rt_dst_alloc(net->loopback_dev,
2347			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2348	if (!rth)
2349		goto e_nobufs;
2350
2351	rth->dst.input= ip_local_deliver;
2352	rth->dst.output= ip_rt_bug;
2353#ifdef CONFIG_IP_ROUTE_CLASSID
2354	rth->dst.tclassid = itag;
2355#endif
 
2356
2357	rth->rt_key_dst	= daddr;
2358	rth->rt_key_src	= saddr;
2359	rth->rt_genid = rt_genid(net);
2360	rth->rt_flags 	= flags|RTCF_LOCAL;
2361	rth->rt_type	= res.type;
2362	rth->rt_key_tos	= tos;
2363	rth->rt_dst	= daddr;
2364	rth->rt_src	= saddr;
2365#ifdef CONFIG_IP_ROUTE_CLASSID
2366	rth->dst.tclassid = itag;
2367#endif
2368	rth->rt_route_iif = dev->ifindex;
2369	rth->rt_iif	= dev->ifindex;
2370	rth->rt_oif	= 0;
2371	rth->rt_mark    = skb->mark;
2372	rth->rt_gateway	= daddr;
2373	rth->rt_spec_dst= spec_dst;
2374	rth->rt_peer_genid = 0;
2375	rth->peer = NULL;
2376	rth->fi = NULL;
2377	if (res.type == RTN_UNREACHABLE) {
2378		rth->dst.input= ip_error;
2379		rth->dst.error= -err;
2380		rth->rt_flags 	&= ~RTCF_LOCAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2381	}
2382	hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2383	rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2384	err = 0;
2385	if (IS_ERR(rth))
2386		err = PTR_ERR(rth);
2387	goto out;
2388
2389no_route:
2390	RT_CACHE_STAT_INC(in_no_route);
2391	spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2392	res.type = RTN_UNREACHABLE;
2393	if (err == -ESRCH)
2394		err = -ENETUNREACH;
2395	goto local_input;
2396
2397	/*
2398	 *	Do not cache martian addresses: they should be logged (RFC1812)
2399	 */
2400martian_destination:
2401	RT_CACHE_STAT_INC(in_martian_dst);
2402#ifdef CONFIG_IP_ROUTE_VERBOSE
2403	if (IN_DEV_LOG_MARTIANS(in_dev))
2404		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2405				     &daddr, &saddr, dev->name);
2406#endif
2407
2408e_hostunreach:
2409	err = -EHOSTUNREACH;
2410	goto out;
2411
2412e_inval:
2413	err = -EINVAL;
2414	goto out;
2415
2416e_nobufs:
2417	err = -ENOBUFS;
2418	goto out;
2419
2420martian_source:
2421	err = -EINVAL;
2422martian_source_keep_err:
2423	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2424	goto out;
2425}
2426
2427int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2428			   u8 tos, struct net_device *dev, bool noref)
 
2429{
2430	struct rtable	*rth;
2431	unsigned int	hash;
2432	int iif = dev->ifindex;
2433	struct net *net;
2434	int res;
2435
2436	net = dev_net(dev);
2437
2438	rcu_read_lock();
2439
2440	if (!rt_caching(net))
2441		goto skip_cache;
2442
2443	tos &= IPTOS_RT_MASK;
2444	hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2445
2446	for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2447	     rth = rcu_dereference(rth->dst.rt_next)) {
2448		if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2449		     ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2450		     (rth->rt_route_iif ^ iif) |
2451		     (rth->rt_key_tos ^ tos)) == 0 &&
2452		    rth->rt_mark == skb->mark &&
2453		    net_eq(dev_net(rth->dst.dev), net) &&
2454		    !rt_is_expired(rth)) {
2455			ipv4_validate_peer(rth);
2456			if (noref) {
2457				dst_use_noref(&rth->dst, jiffies);
2458				skb_dst_set_noref(skb, &rth->dst);
2459			} else {
2460				dst_use(&rth->dst, jiffies);
2461				skb_dst_set(skb, &rth->dst);
2462			}
2463			RT_CACHE_STAT_INC(in_hit);
2464			rcu_read_unlock();
2465			return 0;
2466		}
2467		RT_CACHE_STAT_INC(in_hlist_search);
2468	}
2469
2470skip_cache:
2471	/* Multicast recognition logic is moved from route cache to here.
2472	   The problem was that too many Ethernet cards have broken/missing
2473	   hardware multicast filters :-( As result the host on multicasting
2474	   network acquires a lot of useless route cache entries, sort of
2475	   SDR messages from all the world. Now we try to get rid of them.
2476	   Really, provided software IP multicast filter is organized
2477	   reasonably (at least, hashed), it does not result in a slowdown
2478	   comparing with route cache reject entries.
2479	   Note, that multicast routers are not affected, because
2480	   route cache entry is created eventually.
2481	 */
2482	if (ipv4_is_multicast(daddr)) {
2483		struct in_device *in_dev = __in_dev_get_rcu(dev);
 
 
 
 
 
 
 
 
 
 
 
2484
2485		if (in_dev) {
2486			int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2487						  ip_hdr(skb)->protocol);
2488			if (our
 
 
 
2489#ifdef CONFIG_IP_MROUTE
2490				||
2491			    (!ipv4_is_local_multicast(daddr) &&
2492			     IN_DEV_MFORWARD(in_dev))
2493#endif
2494			   ) {
2495				int res = ip_route_input_mc(skb, daddr, saddr,
2496							    tos, dev, our);
2497				rcu_read_unlock();
2498				return res;
2499			}
2500		}
2501		rcu_read_unlock();
2502		return -EINVAL;
2503	}
2504	res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
 
 
 
 
 
 
 
 
 
 
 
 
2505	rcu_read_unlock();
2506	return res;
 
2507}
2508EXPORT_SYMBOL(ip_route_input_common);
2509
2510/* called with rcu_read_lock() */
2511static struct rtable *__mkroute_output(const struct fib_result *res,
2512				       const struct flowi4 *fl4,
2513				       __be32 orig_daddr, __be32 orig_saddr,
2514				       int orig_oif, __u8 orig_rtos,
2515				       struct net_device *dev_out,
2516				       unsigned int flags)
2517{
2518	struct fib_info *fi = res->fi;
 
2519	struct in_device *in_dev;
2520	u16 type = res->type;
2521	struct rtable *rth;
 
2522
2523	if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
 
2524		return ERR_PTR(-EINVAL);
2525
 
 
 
 
 
 
2526	if (ipv4_is_lbcast(fl4->daddr))
2527		type = RTN_BROADCAST;
2528	else if (ipv4_is_multicast(fl4->daddr))
2529		type = RTN_MULTICAST;
2530	else if (ipv4_is_zeronet(fl4->daddr))
2531		return ERR_PTR(-EINVAL);
2532
2533	if (dev_out->flags & IFF_LOOPBACK)
2534		flags |= RTCF_LOCAL;
2535
2536	in_dev = __in_dev_get_rcu(dev_out);
2537	if (!in_dev)
2538		return ERR_PTR(-EINVAL);
2539
2540	if (type == RTN_BROADCAST) {
2541		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2542		fi = NULL;
2543	} else if (type == RTN_MULTICAST) {
2544		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2545		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2546				     fl4->flowi4_proto))
2547			flags &= ~RTCF_LOCAL;
 
 
2548		/* If multicast route do not exist use
2549		 * default one, but do not gateway in this case.
2550		 * Yes, it is hack.
2551		 */
2552		if (fi && res->prefixlen < 4)
2553			fi = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
2554	}
2555
2556	rth = rt_dst_alloc(dev_out,
2557			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
2558			   IN_DEV_CONF_GET(in_dev, NOXFRM));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2559	if (!rth)
2560		return ERR_PTR(-ENOBUFS);
2561
2562	rth->dst.output = ip_output;
2563
2564	rth->rt_key_dst	= orig_daddr;
2565	rth->rt_key_src	= orig_saddr;
2566	rth->rt_genid = rt_genid(dev_net(dev_out));
2567	rth->rt_flags	= flags;
2568	rth->rt_type	= type;
2569	rth->rt_key_tos	= orig_rtos;
2570	rth->rt_dst	= fl4->daddr;
2571	rth->rt_src	= fl4->saddr;
2572	rth->rt_route_iif = 0;
2573	rth->rt_iif	= orig_oif ? : dev_out->ifindex;
2574	rth->rt_oif	= orig_oif;
2575	rth->rt_mark    = fl4->flowi4_mark;
2576	rth->rt_gateway = fl4->daddr;
2577	rth->rt_spec_dst= fl4->saddr;
2578	rth->rt_peer_genid = 0;
2579	rth->peer = NULL;
2580	rth->fi = NULL;
2581
2582	RT_CACHE_STAT_INC(out_slow_tot);
2583
2584	if (flags & RTCF_LOCAL) {
2585		rth->dst.input = ip_local_deliver;
2586		rth->rt_spec_dst = fl4->daddr;
2587	}
2588	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2589		rth->rt_spec_dst = fl4->saddr;
2590		if (flags & RTCF_LOCAL &&
2591		    !(dev_out->flags & IFF_LOOPBACK)) {
2592			rth->dst.output = ip_mc_output;
2593			RT_CACHE_STAT_INC(out_slow_mc);
2594		}
2595#ifdef CONFIG_IP_MROUTE
2596		if (type == RTN_MULTICAST) {
2597			if (IN_DEV_MFORWARD(in_dev) &&
2598			    !ipv4_is_local_multicast(fl4->daddr)) {
2599				rth->dst.input = ip_mr_input;
2600				rth->dst.output = ip_mc_output;
2601			}
2602		}
2603#endif
2604	}
2605
2606	rt_set_nexthop(rth, fl4, res, fi, type, 0);
 
2607
2608	return rth;
2609}
2610
2611/*
2612 * Major route resolver routine.
2613 * called with rcu_read_lock();
2614 */
2615
2616static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 
2617{
2618	struct net_device *dev_out = NULL;
2619	__u8 tos = RT_FL_TOS(fl4);
2620	unsigned int flags = 0;
2621	struct fib_result res;
 
 
2622	struct rtable *rth;
2623	__be32 orig_daddr;
2624	__be32 orig_saddr;
2625	int orig_oif;
2626
2627	res.fi		= NULL;
2628#ifdef CONFIG_IP_MULTIPLE_TABLES
2629	res.r		= NULL;
2630#endif
2631
2632	orig_daddr = fl4->daddr;
2633	orig_saddr = fl4->saddr;
2634	orig_oif = fl4->flowi4_oif;
2635
2636	fl4->flowi4_iif = net->loopback_dev->ifindex;
2637	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2638	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2639			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
 
 
 
 
 
 
 
 
 
2640
2641	rcu_read_lock();
2642	if (fl4->saddr) {
2643		rth = ERR_PTR(-EINVAL);
2644		if (ipv4_is_multicast(fl4->saddr) ||
2645		    ipv4_is_lbcast(fl4->saddr) ||
2646		    ipv4_is_zeronet(fl4->saddr))
 
2647			goto out;
 
 
 
2648
2649		/* I removed check for oif == dev_out->oif here.
2650		   It was wrong for two reasons:
2651		   1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2652		      is assigned to multiple interfaces.
2653		   2. Moreover, we are allowed to send packets with saddr
2654		      of another iface. --ANK
2655		 */
2656
2657		if (fl4->flowi4_oif == 0 &&
2658		    (ipv4_is_multicast(fl4->daddr) ||
2659		     ipv4_is_lbcast(fl4->daddr))) {
2660			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2661			dev_out = __ip_dev_find(net, fl4->saddr, false);
2662			if (dev_out == NULL)
2663				goto out;
2664
2665			/* Special hack: user can direct multicasts
2666			   and limited broadcast via necessary interface
2667			   without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2668			   This hack is not just for fun, it allows
2669			   vic,vat and friends to work.
2670			   They bind socket to loopback, set ttl to zero
2671			   and expect that it will work.
2672			   From the viewpoint of routing cache they are broken,
2673			   because we are not allowed to build multicast path
2674			   with loopback source addr (look, routing cache
2675			   cannot know, that ttl is zero, so that packet
2676			   will not leave this host and route is valid).
2677			   Luckily, this hack is good workaround.
2678			 */
2679
2680			fl4->flowi4_oif = dev_out->ifindex;
2681			goto make_route;
2682		}
2683
2684		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2685			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2686			if (!__ip_dev_find(net, fl4->saddr, false))
2687				goto out;
2688		}
2689	}
2690
2691
2692	if (fl4->flowi4_oif) {
2693		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2694		rth = ERR_PTR(-ENODEV);
2695		if (dev_out == NULL)
2696			goto out;
2697
2698		/* RACE: Check return value of inet_select_addr instead. */
2699		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2700			rth = ERR_PTR(-ENETUNREACH);
2701			goto out;
2702		}
2703		if (ipv4_is_local_multicast(fl4->daddr) ||
2704		    ipv4_is_lbcast(fl4->daddr)) {
 
2705			if (!fl4->saddr)
2706				fl4->saddr = inet_select_addr(dev_out, 0,
2707							      RT_SCOPE_LINK);
2708			goto make_route;
2709		}
2710		if (fl4->saddr) {
2711			if (ipv4_is_multicast(fl4->daddr))
2712				fl4->saddr = inet_select_addr(dev_out, 0,
2713							      fl4->flowi4_scope);
2714			else if (!fl4->daddr)
2715				fl4->saddr = inet_select_addr(dev_out, 0,
2716							      RT_SCOPE_HOST);
2717		}
2718	}
2719
2720	if (!fl4->daddr) {
2721		fl4->daddr = fl4->saddr;
2722		if (!fl4->daddr)
2723			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2724		dev_out = net->loopback_dev;
2725		fl4->flowi4_oif = net->loopback_dev->ifindex;
2726		res.type = RTN_LOCAL;
2727		flags |= RTCF_LOCAL;
2728		goto make_route;
2729	}
2730
2731	if (fib_lookup(net, fl4, &res)) {
2732		res.fi = NULL;
2733		if (fl4->flowi4_oif) {
 
 
 
2734			/* Apparently, routing tables are wrong. Assume,
2735			   that the destination is on link.
2736
2737			   WHY? DW.
2738			   Because we are allowed to send to iface
2739			   even if it has NO routes and NO assigned
2740			   addresses. When oif is specified, routing
2741			   tables are looked up with only one purpose:
2742			   to catch if destination is gatewayed, rather than
2743			   direct. Moreover, if MSG_DONTROUTE is set,
2744			   we send packet, ignoring both routing tables
2745			   and ifaddr state. --ANK
2746
2747
2748			   We could make it even if oif is unknown,
2749			   likely IPv6, but we do not.
2750			 */
2751
2752			if (fl4->saddr == 0)
2753				fl4->saddr = inet_select_addr(dev_out, 0,
2754							      RT_SCOPE_LINK);
2755			res.type = RTN_UNICAST;
2756			goto make_route;
2757		}
2758		rth = ERR_PTR(-ENETUNREACH);
2759		goto out;
2760	}
2761
2762	if (res.type == RTN_LOCAL) {
2763		if (!fl4->saddr) {
2764			if (res.fi->fib_prefsrc)
2765				fl4->saddr = res.fi->fib_prefsrc;
2766			else
2767				fl4->saddr = fl4->daddr;
2768		}
2769		dev_out = net->loopback_dev;
 
 
 
 
 
 
 
 
 
2770		fl4->flowi4_oif = dev_out->ifindex;
2771		res.fi = NULL;
2772		flags |= RTCF_LOCAL;
2773		goto make_route;
2774	}
2775
2776#ifdef CONFIG_IP_ROUTE_MULTIPATH
2777	if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2778		fib_select_multipath(&res);
2779	else
2780#endif
2781	if (!res.prefixlen &&
2782	    res.table->tb_num_default > 1 &&
2783	    res.type == RTN_UNICAST && !fl4->flowi4_oif)
2784		fib_select_default(&res);
2785
2786	if (!fl4->saddr)
2787		fl4->saddr = FIB_RES_PREFSRC(net, res);
2788
2789	dev_out = FIB_RES_DEV(res);
2790	fl4->flowi4_oif = dev_out->ifindex;
2791
 
2792
2793make_route:
2794	rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2795			       tos, dev_out, flags);
2796	if (!IS_ERR(rth)) {
2797		unsigned int hash;
2798
2799		hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2800			       rt_genid(dev_net(dev_out)));
2801		rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2802	}
2803
2804out:
2805	rcu_read_unlock();
2806	return rth;
2807}
2808
2809struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2810{
2811	struct rtable *rth;
2812	unsigned int hash;
2813
2814	if (!rt_caching(net))
2815		goto slow_output;
2816
2817	hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
2818
2819	rcu_read_lock_bh();
2820	for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2821		rth = rcu_dereference_bh(rth->dst.rt_next)) {
2822		if (rth->rt_key_dst == flp4->daddr &&
2823		    rth->rt_key_src == flp4->saddr &&
2824		    rt_is_output_route(rth) &&
2825		    rth->rt_oif == flp4->flowi4_oif &&
2826		    rth->rt_mark == flp4->flowi4_mark &&
2827		    !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2828			    (IPTOS_RT_MASK | RTO_ONLINK)) &&
2829		    net_eq(dev_net(rth->dst.dev), net) &&
2830		    !rt_is_expired(rth)) {
2831			ipv4_validate_peer(rth);
2832			dst_use(&rth->dst, jiffies);
2833			RT_CACHE_STAT_INC(out_hit);
2834			rcu_read_unlock_bh();
2835			if (!flp4->saddr)
2836				flp4->saddr = rth->rt_src;
2837			if (!flp4->daddr)
2838				flp4->daddr = rth->rt_dst;
2839			return rth;
2840		}
2841		RT_CACHE_STAT_INC(out_hlist_search);
2842	}
2843	rcu_read_unlock_bh();
2844
2845slow_output:
2846	return ip_route_output_slow(net, flp4);
2847}
2848EXPORT_SYMBOL_GPL(__ip_route_output_key);
2849
2850static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2851{
2852	return NULL;
2853}
2854
2855static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2856{
2857	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2858
2859	return mtu ? : dst->dev->mtu;
2860}
2861
2862static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2863{
2864}
2865
2866static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2867					  unsigned long old)
2868{
2869	return NULL;
2870}
2871
2872static struct dst_ops ipv4_dst_blackhole_ops = {
2873	.family			=	AF_INET,
2874	.protocol		=	cpu_to_be16(ETH_P_IP),
2875	.destroy		=	ipv4_dst_destroy,
2876	.check			=	ipv4_blackhole_dst_check,
2877	.mtu			=	ipv4_blackhole_mtu,
2878	.default_advmss		=	ipv4_default_advmss,
2879	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
2880	.cow_metrics		=	ipv4_rt_blackhole_cow_metrics,
2881	.neigh_lookup		=	ipv4_neigh_lookup,
2882};
2883
2884struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2885{
2886	struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2887	struct rtable *ort = (struct rtable *) dst_orig;
2888
 
2889	if (rt) {
2890		struct dst_entry *new = &rt->dst;
2891
2892		new->__use = 1;
2893		new->input = dst_discard;
2894		new->output = dst_discard;
2895		dst_copy_metrics(new, &ort->dst);
2896
2897		new->dev = ort->dst.dev;
2898		if (new->dev)
2899			dev_hold(new->dev);
2900
2901		rt->rt_key_dst = ort->rt_key_dst;
2902		rt->rt_key_src = ort->rt_key_src;
2903		rt->rt_key_tos = ort->rt_key_tos;
2904		rt->rt_route_iif = ort->rt_route_iif;
2905		rt->rt_iif = ort->rt_iif;
2906		rt->rt_oif = ort->rt_oif;
2907		rt->rt_mark = ort->rt_mark;
2908
2909		rt->rt_genid = rt_genid(net);
2910		rt->rt_flags = ort->rt_flags;
2911		rt->rt_type = ort->rt_type;
2912		rt->rt_dst = ort->rt_dst;
2913		rt->rt_src = ort->rt_src;
2914		rt->rt_gateway = ort->rt_gateway;
2915		rt->rt_spec_dst = ort->rt_spec_dst;
2916		rt->peer = ort->peer;
2917		if (rt->peer)
2918			atomic_inc(&rt->peer->refcnt);
2919		rt->fi = ort->fi;
2920		if (rt->fi)
2921			atomic_inc(&rt->fi->fib_clntref);
2922
2923		dst_free(new);
2924	}
2925
2926	dst_release(dst_orig);
2927
2928	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2929}
2930
2931struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2932				    struct sock *sk)
2933{
2934	struct rtable *rt = __ip_route_output_key(net, flp4);
2935
2936	if (IS_ERR(rt))
2937		return rt;
2938
2939	if (flp4->flowi4_proto)
2940		rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2941						   flowi4_to_flowi(flp4),
2942						   sk, 0);
 
 
2943
2944	return rt;
2945}
2946EXPORT_SYMBOL_GPL(ip_route_output_flow);
2947
2948static int rt_fill_info(struct net *net,
2949			struct sk_buff *skb, u32 pid, u32 seq, int event,
2950			int nowait, unsigned int flags)
 
 
2951{
2952	struct rtable *rt = skb_rtable(skb);
2953	struct rtmsg *r;
2954	struct nlmsghdr *nlh;
2955	unsigned long expires = 0;
2956	const struct inet_peer *peer = rt->peer;
2957	u32 id = 0, ts = 0, tsage = 0, error;
2958
2959	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2960	if (nlh == NULL)
2961		return -EMSGSIZE;
2962
2963	r = nlmsg_data(nlh);
2964	r->rtm_family	 = AF_INET;
2965	r->rtm_dst_len	= 32;
2966	r->rtm_src_len	= 0;
2967	r->rtm_tos	= rt->rt_key_tos;
2968	r->rtm_table	= RT_TABLE_MAIN;
2969	if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2970		goto nla_put_failure;
2971	r->rtm_type	= rt->rt_type;
2972	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2973	r->rtm_protocol = RTPROT_UNSPEC;
2974	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2975	if (rt->rt_flags & RTCF_NOTIFY)
2976		r->rtm_flags |= RTM_F_NOTIFY;
 
 
2977
2978	if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
2979		goto nla_put_failure;
2980	if (rt->rt_key_src) {
2981		r->rtm_src_len = 32;
2982		if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
2983			goto nla_put_failure;
2984	}
2985	if (rt->dst.dev &&
2986	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2987		goto nla_put_failure;
 
 
 
2988#ifdef CONFIG_IP_ROUTE_CLASSID
2989	if (rt->dst.tclassid &&
2990	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2991		goto nla_put_failure;
2992#endif
2993	if (rt_is_input_route(rt)) {
2994		if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
 
2995			goto nla_put_failure;
2996	} else if (rt->rt_src != rt->rt_key_src) {
2997		if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
 
 
2998			goto nla_put_failure;
 
 
 
 
 
 
 
 
 
 
 
 
 
2999	}
3000	if (rt->rt_dst != rt->rt_gateway &&
3001	    nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
3002		goto nla_put_failure;
3003
3004	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
3005		goto nla_put_failure;
 
3006
3007	if (rt->rt_mark &&
3008	    nla_put_be32(skb, RTA_MARK, rt->rt_mark))
 
 
 
 
 
 
 
 
 
 
3009		goto nla_put_failure;
3010
3011	error = rt->dst.error;
3012	if (peer) {
3013		inet_peer_refcheck(rt->peer);
3014		id = atomic_read(&peer->ip_id_count) & 0xffff;
3015		if (peer->tcp_ts_stamp) {
3016			ts = peer->tcp_ts;
3017			tsage = get_seconds() - peer->tcp_ts_stamp;
3018		}
3019		expires = ACCESS_ONCE(peer->pmtu_expires);
3020		if (expires) {
3021			if (time_before(jiffies, expires))
3022				expires -= jiffies;
3023			else
3024				expires = 0;
3025		}
3026	}
3027
3028	if (rt_is_input_route(rt)) {
 
 
 
 
 
 
3029#ifdef CONFIG_IP_MROUTE
3030		__be32 dst = rt->rt_dst;
 
 
 
 
 
3031
3032		if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
3033		    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
3034			int err = ipmr_get_route(net, skb,
3035						 rt->rt_src, rt->rt_dst,
3036						 r, nowait);
3037			if (err <= 0) {
3038				if (!nowait) {
3039					if (err == 0)
3040						return 0;
3041					goto nla_put_failure;
3042				} else {
3043					if (err == -EMSGSIZE)
3044						goto nla_put_failure;
3045					error = err;
3046				}
3047			}
3048		} else
3049#endif
3050			if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
3051				goto nla_put_failure;
 
3052	}
3053
3054	if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
3055			       expires, error) < 0)
 
3056		goto nla_put_failure;
3057
3058	return nlmsg_end(skb, nlh);
 
3059
3060nla_put_failure:
3061	nlmsg_cancel(skb, nlh);
3062	return -EMSGSIZE;
3063}
3064
3065static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3066{
3067	struct net *net = sock_net(in_skb->sk);
3068	struct rtmsg *rtm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3069	struct nlattr *tb[RTA_MAX+1];
 
 
 
 
3070	struct rtable *rt = NULL;
 
 
 
3071	__be32 dst = 0;
3072	__be32 src = 0;
 
3073	u32 iif;
3074	int err;
3075	int mark;
3076	struct sk_buff *skb;
3077
3078	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
3079	if (err < 0)
3080		goto errout;
3081
3082	rtm = nlmsg_data(nlh);
 
 
 
 
 
 
 
 
3083
3084	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3085	if (skb == NULL) {
3086		err = -ENOBUFS;
3087		goto errout;
 
3088	}
3089
3090	/* Reserve room for dummy headers, this skb can pass
3091	   through good chunk of routing engine.
3092	 */
3093	skb_reset_mac_header(skb);
3094	skb_reset_network_header(skb);
3095
3096	/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
3097	ip_hdr(skb)->protocol = IPPROTO_ICMP;
3098	skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
3099
3100	src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
3101	dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
3102	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3103	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
3104
3105	if (iif) {
3106		struct net_device *dev;
3107
3108		dev = __dev_get_by_index(net, iif);
3109		if (dev == NULL) {
3110			err = -ENODEV;
3111			goto errout_free;
3112		}
3113
3114		skb->protocol	= htons(ETH_P_IP);
3115		skb->dev	= dev;
3116		skb->mark	= mark;
3117		local_bh_disable();
3118		err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
3119		local_bh_enable();
3120
3121		rt = skb_rtable(skb);
3122		if (err == 0 && rt->dst.error)
3123			err = -rt->dst.error;
3124	} else {
3125		struct flowi4 fl4 = {
3126			.daddr = dst,
3127			.saddr = src,
3128			.flowi4_tos = rtm->rtm_tos,
3129			.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
3130			.flowi4_mark = mark,
3131		};
3132		rt = ip_route_output_key(net, &fl4);
3133
3134		err = 0;
3135		if (IS_ERR(rt))
3136			err = PTR_ERR(rt);
 
 
3137	}
3138
3139	if (err)
3140		goto errout_free;
3141
3142	skb_dst_set(skb, &rt->dst);
3143	if (rtm->rtm_flags & RTM_F_NOTIFY)
3144		rt->rt_flags |= RTCF_NOTIFY;
3145
3146	err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
3147			   RTM_NEWROUTE, 0, 0);
3148	if (err <= 0)
3149		goto errout_free;
3150
3151	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
3152errout:
3153	return err;
3154
3155errout_free:
3156	kfree_skb(skb);
3157	goto errout;
3158}
3159
3160int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
3161{
3162	struct rtable *rt;
3163	int h, s_h;
3164	int idx, s_idx;
3165	struct net *net;
3166
3167	net = sock_net(skb->sk);
 
3168
3169	s_h = cb->args[0];
3170	if (s_h < 0)
3171		s_h = 0;
3172	s_idx = idx = cb->args[1];
3173	for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3174		if (!rt_hash_table[h].chain)
3175			continue;
3176		rcu_read_lock_bh();
3177		for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3178		     rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3179			if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3180				continue;
3181			if (rt_is_expired(rt))
3182				continue;
3183			skb_dst_set_noref(skb, &rt->dst);
3184			if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3185					 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3186					 1, NLM_F_MULTI) <= 0) {
3187				skb_dst_drop(skb);
3188				rcu_read_unlock_bh();
3189				goto done;
 
 
 
 
 
 
 
 
 
 
 
3190			}
3191			skb_dst_drop(skb);
3192		}
3193		rcu_read_unlock_bh();
 
 
 
 
 
3194	}
 
 
 
 
 
 
3195
3196done:
3197	cb->args[0] = h;
3198	cb->args[1] = idx;
3199	return skb->len;
 
 
3200}
3201
3202void ip_rt_multicast_event(struct in_device *in_dev)
3203{
3204	rt_cache_flush(dev_net(in_dev->dev), 0);
3205}
3206
3207#ifdef CONFIG_SYSCTL
3208static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3209					void __user *buffer,
3210					size_t *lenp, loff_t *ppos)
 
 
 
 
3211{
3212	if (write) {
3213		int flush_delay;
3214		ctl_table ctl;
3215		struct net *net;
3216
3217		memcpy(&ctl, __ctl, sizeof(ctl));
3218		ctl.data = &flush_delay;
3219		proc_dointvec(&ctl, write, buffer, lenp, ppos);
3220
3221		net = (struct net *)__ctl->extra1;
3222		rt_cache_flush(net, flush_delay);
 
3223		return 0;
3224	}
3225
3226	return -EINVAL;
3227}
3228
3229static ctl_table ipv4_route_table[] = {
3230	{
3231		.procname	= "gc_thresh",
3232		.data		= &ipv4_dst_ops.gc_thresh,
3233		.maxlen		= sizeof(int),
3234		.mode		= 0644,
3235		.proc_handler	= proc_dointvec,
3236	},
3237	{
3238		.procname	= "max_size",
3239		.data		= &ip_rt_max_size,
3240		.maxlen		= sizeof(int),
3241		.mode		= 0644,
3242		.proc_handler	= proc_dointvec,
3243	},
3244	{
3245		/*  Deprecated. Use gc_min_interval_ms */
3246
3247		.procname	= "gc_min_interval",
3248		.data		= &ip_rt_gc_min_interval,
3249		.maxlen		= sizeof(int),
3250		.mode		= 0644,
3251		.proc_handler	= proc_dointvec_jiffies,
3252	},
3253	{
3254		.procname	= "gc_min_interval_ms",
3255		.data		= &ip_rt_gc_min_interval,
3256		.maxlen		= sizeof(int),
3257		.mode		= 0644,
3258		.proc_handler	= proc_dointvec_ms_jiffies,
3259	},
3260	{
3261		.procname	= "gc_timeout",
3262		.data		= &ip_rt_gc_timeout,
3263		.maxlen		= sizeof(int),
3264		.mode		= 0644,
3265		.proc_handler	= proc_dointvec_jiffies,
3266	},
3267	{
3268		.procname	= "gc_interval",
3269		.data		= &ip_rt_gc_interval,
3270		.maxlen		= sizeof(int),
3271		.mode		= 0644,
3272		.proc_handler	= proc_dointvec_jiffies,
3273	},
3274	{
3275		.procname	= "redirect_load",
3276		.data		= &ip_rt_redirect_load,
3277		.maxlen		= sizeof(int),
3278		.mode		= 0644,
3279		.proc_handler	= proc_dointvec,
3280	},
3281	{
3282		.procname	= "redirect_number",
3283		.data		= &ip_rt_redirect_number,
3284		.maxlen		= sizeof(int),
3285		.mode		= 0644,
3286		.proc_handler	= proc_dointvec,
3287	},
3288	{
3289		.procname	= "redirect_silence",
3290		.data		= &ip_rt_redirect_silence,
3291		.maxlen		= sizeof(int),
3292		.mode		= 0644,
3293		.proc_handler	= proc_dointvec,
3294	},
3295	{
3296		.procname	= "error_cost",
3297		.data		= &ip_rt_error_cost,
3298		.maxlen		= sizeof(int),
3299		.mode		= 0644,
3300		.proc_handler	= proc_dointvec,
3301	},
3302	{
3303		.procname	= "error_burst",
3304		.data		= &ip_rt_error_burst,
3305		.maxlen		= sizeof(int),
3306		.mode		= 0644,
3307		.proc_handler	= proc_dointvec,
3308	},
3309	{
3310		.procname	= "gc_elasticity",
3311		.data		= &ip_rt_gc_elasticity,
3312		.maxlen		= sizeof(int),
3313		.mode		= 0644,
3314		.proc_handler	= proc_dointvec,
3315	},
 
 
 
 
 
 
3316	{
3317		.procname	= "mtu_expires",
3318		.data		= &ip_rt_mtu_expires,
3319		.maxlen		= sizeof(int),
3320		.mode		= 0644,
3321		.proc_handler	= proc_dointvec_jiffies,
3322	},
3323	{
3324		.procname	= "min_pmtu",
3325		.data		= &ip_rt_min_pmtu,
3326		.maxlen		= sizeof(int),
3327		.mode		= 0644,
3328		.proc_handler	= proc_dointvec,
 
3329	},
3330	{
3331		.procname	= "min_adv_mss",
3332		.data		= &ip_rt_min_advmss,
3333		.maxlen		= sizeof(int),
3334		.mode		= 0644,
3335		.proc_handler	= proc_dointvec,
3336	},
3337	{ }
3338};
3339
3340static struct ctl_table ipv4_route_flush_table[] = {
3341	{
3342		.procname	= "flush",
3343		.maxlen		= sizeof(int),
3344		.mode		= 0200,
3345		.proc_handler	= ipv4_sysctl_rtcache_flush,
 
3346	},
3347	{ },
3348};
3349
3350static __net_init int sysctl_route_net_init(struct net *net)
3351{
3352	struct ctl_table *tbl;
 
3353
3354	tbl = ipv4_route_flush_table;
3355	if (!net_eq(net, &init_net)) {
3356		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3357		if (tbl == NULL)
 
 
3358			goto err_dup;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3359	}
3360	tbl[0].extra1 = net;
3361
3362	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3363	if (net->ipv4.route_hdr == NULL)
 
3364		goto err_reg;
3365	return 0;
3366
3367err_reg:
3368	if (tbl != ipv4_route_flush_table)
3369		kfree(tbl);
3370err_dup:
3371	return -ENOMEM;
3372}
3373
3374static __net_exit void sysctl_route_net_exit(struct net *net)
3375{
3376	struct ctl_table *tbl;
3377
3378	tbl = net->ipv4.route_hdr->ctl_table_arg;
3379	unregister_net_sysctl_table(net->ipv4.route_hdr);
3380	BUG_ON(tbl == ipv4_route_flush_table);
3381	kfree(tbl);
3382}
3383
3384static __net_initdata struct pernet_operations sysctl_route_ops = {
3385	.init = sysctl_route_net_init,
3386	.exit = sysctl_route_net_exit,
3387};
3388#endif
3389
 
 
 
 
 
 
 
 
 
 
 
 
 
3390static __net_init int rt_genid_init(struct net *net)
3391{
3392	get_random_bytes(&net->ipv4.rt_genid,
3393			 sizeof(net->ipv4.rt_genid));
3394	get_random_bytes(&net->ipv4.dev_addr_genid,
3395			 sizeof(net->ipv4.dev_addr_genid));
3396	return 0;
3397}
3398
3399static __net_initdata struct pernet_operations rt_genid_ops = {
3400	.init = rt_genid_init,
3401};
3402
 
 
 
3403
3404#ifdef CONFIG_IP_ROUTE_CLASSID
3405struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3406#endif /* CONFIG_IP_ROUTE_CLASSID */
 
 
 
3407
3408static __initdata unsigned long rhash_entries;
3409static int __init set_rhash_entries(char *str)
3410{
3411	ssize_t ret;
3412
3413	if (!str)
3414		return 0;
 
 
3415
3416	ret = kstrtoul(str, 0, &rhash_entries);
3417	if (ret)
3418		return 0;
 
3419
3420	return 1;
3421}
3422__setup("rhash_entries=", set_rhash_entries);
3423
3424int __init ip_rt_init(void)
3425{
3426	int rc = 0;
 
3427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3428#ifdef CONFIG_IP_ROUTE_CLASSID
3429	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3430	if (!ip_rt_acct)
3431		panic("IP: failed to allocate ip_rt_acct\n");
3432#endif
3433
3434	ipv4_dst_ops.kmem_cachep =
3435		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3436				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3437
3438	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3439
3440	if (dst_entries_init(&ipv4_dst_ops) < 0)
3441		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3442
3443	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3444		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3445
3446	rt_hash_table = (struct rt_hash_bucket *)
3447		alloc_large_system_hash("IP route cache",
3448					sizeof(struct rt_hash_bucket),
3449					rhash_entries,
3450					(totalram_pages >= 128 * 1024) ?
3451					15 : 17,
3452					0,
3453					&rt_hash_log,
3454					&rt_hash_mask,
3455					0,
3456					rhash_entries ? 0 : 512 * 1024);
3457	memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3458	rt_hash_lock_init();
3459
3460	ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3461	ip_rt_max_size = (rt_hash_mask + 1) * 16;
3462
3463	devinet_init();
3464	ip_fib_init();
3465
3466	INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3467	expires_ljiffies = jiffies;
3468	schedule_delayed_work(&expires_work,
3469		net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3470
3471	if (ip_rt_proc_init())
3472		pr_err("Unable to create route proc files\n");
3473#ifdef CONFIG_XFRM
3474	xfrm_init();
3475	xfrm4_init(ip_rt_max_size);
3476#endif
3477	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
 
3478
3479#ifdef CONFIG_SYSCTL
3480	register_pernet_subsys(&sysctl_route_ops);
3481#endif
 
3482	register_pernet_subsys(&rt_genid_ops);
3483	return rc;
 
3484}
3485
3486#ifdef CONFIG_SYSCTL
3487/*
3488 * We really need to sanitize the damn ipv4 init order, then all
3489 * this nonsense will go away.
3490 */
3491void __init ip_static_sysctl_init(void)
3492{
3493	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3494}
3495#endif
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		ROUTE - implementation of the IP router.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
  13 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  14 *
  15 * Fixes:
  16 *		Alan Cox	:	Verify area fixes.
  17 *		Alan Cox	:	cli() protects routing changes
  18 *		Rui Oliveira	:	ICMP routing table updates
  19 *		(rco@di.uminho.pt)	Routing table insertion and update
  20 *		Linus Torvalds	:	Rewrote bits to be sensible
  21 *		Alan Cox	:	Added BSD route gw semantics
  22 *		Alan Cox	:	Super /proc >4K
  23 *		Alan Cox	:	MTU in route table
  24 *		Alan Cox	:	MSS actually. Also added the window
  25 *					clamper.
  26 *		Sam Lantinga	:	Fixed route matching in rt_del()
  27 *		Alan Cox	:	Routing cache support.
  28 *		Alan Cox	:	Removed compatibility cruft.
  29 *		Alan Cox	:	RTF_REJECT support.
  30 *		Alan Cox	:	TCP irtt support.
  31 *		Jonathan Naylor	:	Added Metric support.
  32 *	Miquel van Smoorenburg	:	BSD API fixes.
  33 *	Miquel van Smoorenburg	:	Metrics.
  34 *		Alan Cox	:	Use __u32 properly
  35 *		Alan Cox	:	Aligned routing errors more closely with BSD
  36 *					our system is still very different.
  37 *		Alan Cox	:	Faster /proc handling
  38 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
  39 *					routing caches and better behaviour.
  40 *
  41 *		Olaf Erb	:	irtt wasn't being copied right.
  42 *		Bjorn Ekwall	:	Kerneld route support.
  43 *		Alan Cox	:	Multicast fixed (I hope)
  44 *		Pavel Krauz	:	Limited broadcast fixed
  45 *		Mike McLagan	:	Routing by source
  46 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
  47 *					route.c and rewritten from scratch.
  48 *		Andi Kleen	:	Load-limit warning messages.
  49 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  50 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
  51 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
  52 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
  53 *		Marc Boucher	:	routing by fwmark
  54 *	Robert Olsson		:	Added rt_cache statistics
  55 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
  56 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
  57 *	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
  58 *	Ilia Sotnikov		:	Removed TOS from hash calculations
 
 
 
 
 
  59 */
  60
  61#define pr_fmt(fmt) "IPv4: " fmt
  62
  63#include <linux/module.h>
 
  64#include <linux/bitops.h>
 
  65#include <linux/kernel.h>
  66#include <linux/mm.h>
  67#include <linux/memblock.h>
 
  68#include <linux/socket.h>
 
  69#include <linux/errno.h>
  70#include <linux/in.h>
  71#include <linux/inet.h>
  72#include <linux/netdevice.h>
  73#include <linux/proc_fs.h>
  74#include <linux/init.h>
 
  75#include <linux/skbuff.h>
  76#include <linux/inetdevice.h>
  77#include <linux/igmp.h>
  78#include <linux/pkt_sched.h>
  79#include <linux/mroute.h>
  80#include <linux/netfilter_ipv4.h>
  81#include <linux/random.h>
 
  82#include <linux/rcupdate.h>
 
  83#include <linux/slab.h>
  84#include <linux/jhash.h>
  85#include <net/dst.h>
  86#include <net/dst_metadata.h>
  87#include <net/inet_dscp.h>
  88#include <net/net_namespace.h>
 
  89#include <net/ip.h>
  90#include <net/route.h>
  91#include <net/inetpeer.h>
  92#include <net/sock.h>
  93#include <net/ip_fib.h>
  94#include <net/nexthop.h>
  95#include <net/tcp.h>
  96#include <net/icmp.h>
  97#include <net/xfrm.h>
  98#include <net/lwtunnel.h>
  99#include <net/netevent.h>
 100#include <net/rtnetlink.h>
 101#ifdef CONFIG_SYSCTL
 102#include <linux/sysctl.h>
 
 103#endif
 104#include <net/secure_seq.h>
 105#include <net/ip_tunnels.h>
 106
 107#include "fib_lookup.h"
 108
 109#define RT_FL_TOS(oldflp4) \
 110	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 111
 
 
 112#define RT_GC_TIMEOUT (300*HZ)
 113
 114#define DEFAULT_MIN_PMTU (512 + 20 + 20)
 115#define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
 116#define DEFAULT_MIN_ADVMSS 256
 117static int ip_rt_max_size;
 
 
 
 118static int ip_rt_redirect_number __read_mostly	= 9;
 119static int ip_rt_redirect_load __read_mostly	= HZ / 50;
 120static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
 121static int ip_rt_error_cost __read_mostly	= HZ;
 122static int ip_rt_error_burst __read_mostly	= 5 * HZ;
 
 
 
 
 
 123
 124static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
 
 125
 126/*
 127 *	Interface to generic destination cache.
 128 */
 129
 130INDIRECT_CALLABLE_SCOPE
 131struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 132static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
 133INDIRECT_CALLABLE_SCOPE
 134unsigned int		ipv4_mtu(const struct dst_entry *dst);
 135static void		ipv4_negative_advice(struct sock *sk,
 136					     struct dst_entry *dst);
 137static void		 ipv4_link_failure(struct sk_buff *skb);
 138static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 139					   struct sk_buff *skb, u32 mtu,
 140					   bool confirm_neigh);
 141static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
 142					struct sk_buff *skb);
 143static void		ipv4_dst_destroy(struct dst_entry *dst);
 
 144
 145static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
 146{
 147	WARN_ON(1);
 148	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149}
 150
 151static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 152					   struct sk_buff *skb,
 153					   const void *daddr);
 154static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
 155
 156static struct dst_ops ipv4_dst_ops = {
 157	.family =		AF_INET,
 
 
 158	.check =		ipv4_dst_check,
 159	.default_advmss =	ipv4_default_advmss,
 160	.mtu =			ipv4_mtu,
 161	.cow_metrics =		ipv4_cow_metrics,
 162	.destroy =		ipv4_dst_destroy,
 
 163	.negative_advice =	ipv4_negative_advice,
 164	.link_failure =		ipv4_link_failure,
 165	.update_pmtu =		ip_rt_update_pmtu,
 166	.redirect =		ip_do_redirect,
 167	.local_out =		__ip_local_out,
 168	.neigh_lookup =		ipv4_neigh_lookup,
 169	.confirm_neigh =	ipv4_confirm_neigh,
 170};
 171
 172#define ECN_OR_COST(class)	TC_PRIO_##class
 173
 174const __u8 ip_tos2prio[16] = {
 175	TC_PRIO_BESTEFFORT,
 176	ECN_OR_COST(BESTEFFORT),
 177	TC_PRIO_BESTEFFORT,
 178	ECN_OR_COST(BESTEFFORT),
 179	TC_PRIO_BULK,
 180	ECN_OR_COST(BULK),
 181	TC_PRIO_BULK,
 182	ECN_OR_COST(BULK),
 183	TC_PRIO_INTERACTIVE,
 184	ECN_OR_COST(INTERACTIVE),
 185	TC_PRIO_INTERACTIVE,
 186	ECN_OR_COST(INTERACTIVE),
 187	TC_PRIO_INTERACTIVE_BULK,
 188	ECN_OR_COST(INTERACTIVE_BULK),
 189	TC_PRIO_INTERACTIVE_BULK,
 190	ECN_OR_COST(INTERACTIVE_BULK)
 191};
 192EXPORT_SYMBOL(ip_tos2prio);
 193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
 195#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
 
 
 
 
 
 
 
 
 
 
 
 
 
 196
 197#ifdef CONFIG_PROC_FS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 198static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 199{
 
 200	if (*pos)
 201		return NULL;
 
 202	return SEQ_START_TOKEN;
 203}
 204
 205static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 206{
 
 
 
 
 
 
 207	++*pos;
 208	return NULL;
 209}
 210
 211static void rt_cache_seq_stop(struct seq_file *seq, void *v)
 212{
 
 
 213}
 214
 215static int rt_cache_seq_show(struct seq_file *seq, void *v)
 216{
 217	if (v == SEQ_START_TOKEN)
 218		seq_printf(seq, "%-127s\n",
 219			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
 220			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
 221			   "HHUptod\tSpecDst");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 222	return 0;
 223}
 224
 225static const struct seq_operations rt_cache_seq_ops = {
 226	.start  = rt_cache_seq_start,
 227	.next   = rt_cache_seq_next,
 228	.stop   = rt_cache_seq_stop,
 229	.show   = rt_cache_seq_show,
 230};
 231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 233{
 234	int cpu;
 235
 236	if (*pos == 0)
 237		return SEQ_START_TOKEN;
 238
 239	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
 240		if (!cpu_possible(cpu))
 241			continue;
 242		*pos = cpu+1;
 243		return &per_cpu(rt_cache_stat, cpu);
 244	}
 245	return NULL;
 246}
 247
 248static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 249{
 250	int cpu;
 251
 252	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
 253		if (!cpu_possible(cpu))
 254			continue;
 255		*pos = cpu+1;
 256		return &per_cpu(rt_cache_stat, cpu);
 257	}
 258	(*pos)++;
 259	return NULL;
 260
 261}
 262
 263static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
 264{
 265
 266}
 267
 268static int rt_cpu_seq_show(struct seq_file *seq, void *v)
 269{
 270	struct rt_cache_stat *st = v;
 271
 272	if (v == SEQ_START_TOKEN) {
 273		seq_puts(seq, "entries  in_hit   in_slow_tot in_slow_mc in_no_route in_brd   in_martian_dst in_martian_src out_hit  out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
 274		return 0;
 275	}
 276
 277	seq_printf(seq, "%08x %08x %08x    %08x   %08x    %08x %08x       "
 278			"%08x       %08x %08x     %08x    %08x %08x   "
 279			"%08x     %08x        %08x        %08x\n",
 280		   dst_entries_get_slow(&ipv4_dst_ops),
 281		   0, /* st->in_hit */
 282		   st->in_slow_tot,
 283		   st->in_slow_mc,
 284		   st->in_no_route,
 285		   st->in_brd,
 286		   st->in_martian_dst,
 287		   st->in_martian_src,
 288
 289		   0, /* st->out_hit */
 290		   st->out_slow_tot,
 291		   st->out_slow_mc,
 292
 293		   0, /* st->gc_total */
 294		   0, /* st->gc_ignored */
 295		   0, /* st->gc_goal_miss */
 296		   0, /* st->gc_dst_overflow */
 297		   0, /* st->in_hlist_search */
 298		   0  /* st->out_hlist_search */
 299		);
 300	return 0;
 301}
 302
 303static const struct seq_operations rt_cpu_seq_ops = {
 304	.start  = rt_cpu_seq_start,
 305	.next   = rt_cpu_seq_next,
 306	.stop   = rt_cpu_seq_stop,
 307	.show   = rt_cpu_seq_show,
 308};
 309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 310#ifdef CONFIG_IP_ROUTE_CLASSID
 311static int rt_acct_proc_show(struct seq_file *m, void *v)
 312{
 313	struct ip_rt_acct *dst, *src;
 314	unsigned int i, j;
 315
 316	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
 317	if (!dst)
 318		return -ENOMEM;
 319
 320	for_each_possible_cpu(i) {
 321		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
 322		for (j = 0; j < 256; j++) {
 323			dst[j].o_bytes   += src[j].o_bytes;
 324			dst[j].o_packets += src[j].o_packets;
 325			dst[j].i_bytes   += src[j].i_bytes;
 326			dst[j].i_packets += src[j].i_packets;
 327		}
 328	}
 329
 330	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
 331	kfree(dst);
 332	return 0;
 333}
 
 
 
 
 
 
 
 
 
 
 
 
 
 334#endif
 335
 336static int __net_init ip_rt_do_proc_init(struct net *net)
 337{
 338	struct proc_dir_entry *pde;
 339
 340	pde = proc_create_seq("rt_cache", 0444, net->proc_net,
 341			      &rt_cache_seq_ops);
 342	if (!pde)
 343		goto err1;
 344
 345	pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
 346			      &rt_cpu_seq_ops);
 347	if (!pde)
 348		goto err2;
 349
 350#ifdef CONFIG_IP_ROUTE_CLASSID
 351	pde = proc_create_single("rt_acct", 0, net->proc_net,
 352			rt_acct_proc_show);
 353	if (!pde)
 354		goto err3;
 355#endif
 356	return 0;
 357
 358#ifdef CONFIG_IP_ROUTE_CLASSID
 359err3:
 360	remove_proc_entry("rt_cache", net->proc_net_stat);
 361#endif
 362err2:
 363	remove_proc_entry("rt_cache", net->proc_net);
 364err1:
 365	return -ENOMEM;
 366}
 367
 368static void __net_exit ip_rt_do_proc_exit(struct net *net)
 369{
 370	remove_proc_entry("rt_cache", net->proc_net_stat);
 371	remove_proc_entry("rt_cache", net->proc_net);
 372#ifdef CONFIG_IP_ROUTE_CLASSID
 373	remove_proc_entry("rt_acct", net->proc_net);
 374#endif
 375}
 376
 377static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
 378	.init = ip_rt_do_proc_init,
 379	.exit = ip_rt_do_proc_exit,
 380};
 381
 382static int __init ip_rt_proc_init(void)
 383{
 384	return register_pernet_subsys(&ip_rt_proc_ops);
 385}
 386
 387#else
 388static inline int ip_rt_proc_init(void)
 389{
 390	return 0;
 391}
 392#endif /* CONFIG_PROC_FS */
 393
 394static inline bool rt_is_expired(const struct rtable *rth)
 395{
 396	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
 397}
 398
 399void rt_cache_flush(struct net *net)
 400{
 401	rt_genid_bump_ipv4(net);
 
 402}
 403
 404static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 405					   struct sk_buff *skb,
 406					   const void *daddr)
 407{
 408	const struct rtable *rt = container_of(dst, struct rtable, dst);
 409	struct net_device *dev = dst->dev;
 410	struct neighbour *n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 411
 412	rcu_read_lock();
 413
 414	if (likely(rt->rt_gw_family == AF_INET)) {
 415		n = ip_neigh_gw4(dev, rt->rt_gw4);
 416	} else if (rt->rt_gw_family == AF_INET6) {
 417		n = ip_neigh_gw6(dev, &rt->rt_gw6);
 418        } else {
 419		__be32 pkey;
 420
 421		pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
 422		n = ip_neigh_gw4(dev, pkey);
 423	}
 424
 425	if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
 426		n = NULL;
 427
 428	rcu_read_unlock();
 
 
 
 
 429
 430	return n;
 
 
 
 
 
 431}
 432
 433static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
 434{
 435	const struct rtable *rt = container_of(dst, struct rtable, dst);
 436	struct net_device *dev = dst->dev;
 437	const __be32 *pkey = daddr;
 
 
 
 
 438
 439	if (rt->rt_gw_family == AF_INET) {
 440		pkey = (const __be32 *)&rt->rt_gw4;
 441	} else if (rt->rt_gw_family == AF_INET6) {
 442		return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
 443	} else if (!daddr ||
 444		 (rt->rt_flags &
 445		  (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
 446		return;
 447	}
 448	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
 449}
 450
 451/* Hash tables of size 2048..262144 depending on RAM size.
 452 * Each bucket uses 8 bytes.
 453 */
 454static u32 ip_idents_mask __read_mostly;
 455static atomic_t *ip_idents __read_mostly;
 456static u32 *ip_tstamps __read_mostly;
 457
 458/* In order to protect privacy, we add a perturbation to identifiers
 459 * if one generator is seldom used. This makes hard for an attacker
 460 * to infer how many packets were sent between two points in time.
 461 */
 462static u32 ip_idents_reserve(u32 hash, int segs)
 463{
 464	u32 bucket, old, now = (u32)jiffies;
 465	atomic_t *p_id;
 466	u32 *p_tstamp;
 467	u32 delta = 0;
 468
 469	bucket = hash & ip_idents_mask;
 470	p_tstamp = ip_tstamps + bucket;
 471	p_id = ip_idents + bucket;
 472	old = READ_ONCE(*p_tstamp);
 473
 474	if (old != now && cmpxchg(p_tstamp, old, now) == old)
 475		delta = get_random_u32_below(now - old);
 476
 477	/* If UBSAN reports an error there, please make sure your compiler
 478	 * supports -fno-strict-overflow before reporting it that was a bug
 479	 * in UBSAN, and it has been fixed in GCC-8.
 480	 */
 481	return atomic_add_return(segs + delta, p_id) - segs;
 482}
 483
 484void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
 
 
 
 
 
 485{
 486	u32 hash, id;
 
 487
 488	/* Note the following code is not safe, but this is okay. */
 489	if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
 490		get_random_bytes(&net->ipv4.ip_id_key,
 491				 sizeof(net->ipv4.ip_id_key));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492
 493	hash = siphash_3u32((__force u32)iph->daddr,
 494			    (__force u32)iph->saddr,
 495			    iph->protocol,
 496			    &net->ipv4.ip_id_key);
 497	id = ip_idents_reserve(hash, segs);
 498	iph->id = htons(id);
 
 499}
 500EXPORT_SYMBOL(__ip_select_ident);
 501
 502static void ip_rt_fix_tos(struct flowi4 *fl4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503{
 504	__u8 tos = RT_FL_TOS(fl4);
 505
 506	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
 507	if (tos & RTO_ONLINK)
 508		fl4->flowi4_scope = RT_SCOPE_LINK;
 
 
 
 509}
 510
 511static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
 512			     const struct sock *sk, const struct iphdr *iph,
 513			     int oif, __u8 tos, u8 prot, u32 mark,
 514			     int flow_flags)
 515{
 516	__u8 scope = RT_SCOPE_UNIVERSE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517
 518	if (sk) {
 519		oif = sk->sk_bound_dev_if;
 520		mark = READ_ONCE(sk->sk_mark);
 521		tos = ip_sock_rt_tos(sk);
 522		scope = ip_sock_rt_scope(sk);
 523		prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW :
 524						    sk->sk_protocol;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 525	}
 
 
 526
 527	flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
 528			   prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
 529			   sock_net_uid(net, sk));
 
 
 
 
 
 530}
 531
 532static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
 533			       const struct sock *sk)
 
 
 
 
 
 534{
 535	const struct net *net = dev_net(skb->dev);
 536	const struct iphdr *iph = ip_hdr(skb);
 537	int oif = skb->dev->ifindex;
 538	u8 prot = iph->protocol;
 539	u32 mark = skb->mark;
 540	__u8 tos = iph->tos;
 541
 542	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
 
 
 543}
 544
 545static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
 
 
 
 
 546{
 547	const struct inet_sock *inet = inet_sk(sk);
 548	const struct ip_options_rcu *inet_opt;
 549	__be32 daddr = inet->inet_daddr;
 
 550
 551	rcu_read_lock();
 552	inet_opt = rcu_dereference(inet->inet_opt);
 553	if (inet_opt && inet_opt->opt.srr)
 554		daddr = inet_opt->opt.faddr;
 555	flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
 556			   ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
 557			   ip_sock_rt_scope(sk),
 558			   inet_test_bit(HDRINCL, sk) ?
 559				IPPROTO_RAW : sk->sk_protocol,
 560			   inet_sk_flowi_flags(sk),
 561			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
 562	rcu_read_unlock();
 563}
 564
 565static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
 566				 const struct sk_buff *skb)
 567{
 568	if (skb)
 569		build_skb_flow_key(fl4, skb, sk);
 570	else
 571		build_sk_flow_key(fl4, sk);
 572}
 573
 574static DEFINE_SPINLOCK(fnhe_lock);
 
 575
 576static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
 
 
 
 
 
 
 
 
 
 
 577{
 578	struct rtable *rt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579
 580	rt = rcu_dereference(fnhe->fnhe_rth_input);
 581	if (rt) {
 582		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
 583		dst_dev_put(&rt->dst);
 584		dst_release(&rt->dst);
 585	}
 586	rt = rcu_dereference(fnhe->fnhe_rth_output);
 587	if (rt) {
 588		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
 589		dst_dev_put(&rt->dst);
 590		dst_release(&rt->dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 591	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592}
 593
 594static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
 
 
 
 595{
 596	struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
 597	struct fib_nh_exception *fnhe, *oldest = NULL;
 598
 599	for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
 600		fnhe = rcu_dereference_protected(*fnhe_p,
 601						 lockdep_is_held(&fnhe_lock));
 602		if (!fnhe)
 603			break;
 604		if (!oldest ||
 605		    time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
 606			oldest = fnhe;
 607			oldest_p = fnhe_p;
 608		}
 609	}
 610	fnhe_flush_routes(oldest);
 611	*oldest_p = oldest->fnhe_next;
 612	kfree_rcu(oldest, rcu);
 613}
 614
 615static u32 fnhe_hashfun(__be32 daddr)
 616{
 617	static siphash_aligned_key_t fnhe_hash_key;
 618	u64 hval;
 
 
 
 
 
 619
 620	net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
 621	hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
 622	return hash_64(hval, FNHE_HASH_SHIFT);
 
 
 
 
 
 
 623}
 624
 625static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
 626{
 627	rt->rt_pmtu = fnhe->fnhe_pmtu;
 628	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
 629	rt->dst.expires = fnhe->fnhe_expires;
 
 630
 631	if (fnhe->fnhe_gw) {
 632		rt->rt_flags |= RTCF_REDIRECTED;
 633		rt->rt_uses_gateway = 1;
 634		rt->rt_gw_family = AF_INET;
 635		rt->rt_gw4 = fnhe->fnhe_gw;
 636	}
 637}
 638
 639static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
 640				  __be32 gw, u32 pmtu, bool lock,
 641				  unsigned long expires)
 642{
 643	struct fnhe_hash_bucket *hash;
 644	struct fib_nh_exception *fnhe;
 645	struct rtable *rt;
 646	u32 genid, hval;
 647	unsigned int i;
 648	int depth;
 
 
 
 
 
 
 
 649
 650	genid = fnhe_genid(dev_net(nhc->nhc_dev));
 651	hval = fnhe_hashfun(daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 652
 653	spin_lock_bh(&fnhe_lock);
 
 
 
 
 
 
 
 
 654
 655	hash = rcu_dereference(nhc->nhc_exceptions);
 656	if (!hash) {
 657		hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
 658		if (!hash)
 659			goto out_unlock;
 660		rcu_assign_pointer(nhc->nhc_exceptions, hash);
 661	}
 662
 663	hash += hval;
 664
 665	depth = 0;
 666	for (fnhe = rcu_dereference(hash->chain); fnhe;
 667	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
 668		if (fnhe->fnhe_daddr == daddr)
 669			break;
 670		depth++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671	}
 672
 673	if (fnhe) {
 674		if (fnhe->fnhe_genid != genid)
 675			fnhe->fnhe_genid = genid;
 676		if (gw)
 677			fnhe->fnhe_gw = gw;
 678		if (pmtu) {
 679			fnhe->fnhe_pmtu = pmtu;
 680			fnhe->fnhe_mtu_locked = lock;
 681		}
 682		fnhe->fnhe_expires = max(1UL, expires);
 683		/* Update all cached dsts too */
 684		rt = rcu_dereference(fnhe->fnhe_rth_input);
 685		if (rt)
 686			fill_route_from_fnhe(rt, fnhe);
 687		rt = rcu_dereference(fnhe->fnhe_rth_output);
 688		if (rt)
 689			fill_route_from_fnhe(rt, fnhe);
 690	} else {
 691		/* Randomize max depth to avoid some side channels attacks. */
 692		int max_depth = FNHE_RECLAIM_DEPTH +
 693				get_random_u32_below(FNHE_RECLAIM_DEPTH);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 694
 695		while (depth > max_depth) {
 696			fnhe_remove_oldest(hash);
 697			depth--;
 698		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699
 700		fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
 701		if (!fnhe)
 702			goto out_unlock;
 
 
 
 
 
 
 
 
 
 703
 704		fnhe->fnhe_next = hash->chain;
 
 
 
 
 
 705
 706		fnhe->fnhe_genid = genid;
 707		fnhe->fnhe_daddr = daddr;
 708		fnhe->fnhe_gw = gw;
 709		fnhe->fnhe_pmtu = pmtu;
 710		fnhe->fnhe_mtu_locked = lock;
 711		fnhe->fnhe_expires = max(1UL, expires);
 712
 713		rcu_assign_pointer(hash->chain, fnhe);
 
 
 714
 715		/* Exception created; mark the cached routes for the nexthop
 716		 * stale, so anyone caching it rechecks if this exception
 717		 * applies to them.
 718		 */
 719		rt = rcu_dereference(nhc->nhc_rth_input);
 720		if (rt)
 721			rt->dst.obsolete = DST_OBSOLETE_KILL;
 
 
 
 722
 723		for_each_possible_cpu(i) {
 724			struct rtable __rcu **prt;
 
 
 
 
 
 
 725
 726			prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
 727			rt = rcu_dereference(*prt);
 728			if (rt)
 729				rt->dst.obsolete = DST_OBSOLETE_KILL;
 
 
 
 
 
 730		}
 
 731	}
 732
 733	fnhe->fnhe_stamp = jiffies;
 734
 735out_unlock:
 736	spin_unlock_bh(&fnhe_lock);
 737}
 738
 739static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
 740			     bool kill_route)
 741{
 742	__be32 new_gw = icmp_hdr(skb)->un.gateway;
 743	__be32 old_gw = ip_hdr(skb)->saddr;
 744	struct net_device *dev = skb->dev;
 745	struct in_device *in_dev;
 746	struct fib_result res;
 747	struct neighbour *n;
 748	struct net *net;
 749
 750	switch (icmp_hdr(skb)->code & 7) {
 751	case ICMP_REDIR_NET:
 752	case ICMP_REDIR_NETTOS:
 753	case ICMP_REDIR_HOST:
 754	case ICMP_REDIR_HOSTTOS:
 755		break;
 756
 757	default:
 
 
 758		return;
 759	}
 
 
 
 
 
 
 
 
 
 
 760
 761	if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
 762		return;
 
 
 
 
 
 
 
 
 763
 764	in_dev = __in_dev_get_rcu(dev);
 765	if (!in_dev)
 766		return;
 767
 768	net = dev_net(dev);
 769	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
 770	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
 771	    ipv4_is_zeronet(new_gw))
 772		goto reject_redirect;
 773
 774	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
 775		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
 776			goto reject_redirect;
 777		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
 778			goto reject_redirect;
 779	} else {
 780		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
 781			goto reject_redirect;
 782	}
 783
 784	n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
 785	if (!n)
 786		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
 787	if (!IS_ERR(n)) {
 788		if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
 789			neigh_event_send(n, NULL);
 790		} else {
 791			if (fib_lookup(net, fl4, &res, 0) == 0) {
 792				struct fib_nh_common *nhc;
 
 
 
 793
 794				fib_select_path(net, &res, fl4, skb);
 795				nhc = FIB_RES_NHC(res);
 796				update_or_create_fnhe(nhc, fl4->daddr, new_gw,
 797						0, false,
 798						jiffies + ip_rt_gc_timeout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799			}
 800			if (kill_route)
 801				rt->dst.obsolete = DST_OBSOLETE_KILL;
 802			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
 803		}
 804		neigh_release(n);
 805	}
 806	return;
 807
 808reject_redirect:
 809#ifdef CONFIG_IP_ROUTE_VERBOSE
 810	if (IN_DEV_LOG_MARTIANS(in_dev)) {
 811		const struct iphdr *iph = (const struct iphdr *) skb->data;
 812		__be32 daddr = iph->daddr;
 813		__be32 saddr = iph->saddr;
 814
 815		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
 816				     "  Advised path = %pI4 -> %pI4\n",
 817				     &old_gw, dev->name, &new_gw,
 818				     &saddr, &daddr);
 819	}
 820#endif
 821	;
 822}
 823
 824static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
 825{
 826	struct rtable *rt;
 827	struct flowi4 fl4;
 828	const struct iphdr *iph = (const struct iphdr *) skb->data;
 829	struct net *net = dev_net(skb->dev);
 830	int oif = skb->dev->ifindex;
 831	u8 prot = iph->protocol;
 832	u32 mark = skb->mark;
 833	__u8 tos = iph->tos;
 834
 835	rt = dst_rtable(dst);
 
 
 836
 837	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
 838	__ip_do_redirect(rt, skb, &fl4, true);
 839}
 840
 841static void ipv4_negative_advice(struct sock *sk,
 842				 struct dst_entry *dst)
 843{
 844	struct rtable *rt = dst_rtable(dst);
 
 845
 846	if ((dst->obsolete > 0) ||
 847	    (rt->rt_flags & RTCF_REDIRECTED) ||
 848	    rt->dst.expires)
 849		sk_dst_reset(sk);
 
 
 
 
 
 
 
 
 
 
 
 850}
 851
 852/*
 853 * Algorithm:
 854 *	1. The first ip_rt_redirect_number redirects are sent
 855 *	   with exponential backoff, then we stop sending them at all,
 856 *	   assuming that the host ignores our redirects.
 857 *	2. If we did not see packets requiring redirects
 858 *	   during ip_rt_redirect_silence, we assume that the host
 859 *	   forgot redirected route and start to send redirects again.
 860 *
 861 * This algorithm is much cheaper and more intelligent than dumb load limiting
 862 * in icmp.c.
 863 *
 864 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
 865 * and "frag. need" (breaks PMTU discovery) in icmp.c.
 866 */
 867
 868void ip_rt_send_redirect(struct sk_buff *skb)
 869{
 870	struct rtable *rt = skb_rtable(skb);
 871	struct in_device *in_dev;
 872	struct inet_peer *peer;
 873	struct net *net;
 874	int log_martians;
 875	int vif;
 876
 877	rcu_read_lock();
 878	in_dev = __in_dev_get_rcu(rt->dst.dev);
 879	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
 880		rcu_read_unlock();
 881		return;
 882	}
 883	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
 884	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
 885	rcu_read_unlock();
 886
 887	net = dev_net(rt->dst.dev);
 888	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
 
 889	if (!peer) {
 890		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
 891			  rt_nexthop(rt, ip_hdr(skb)->daddr));
 892		return;
 893	}
 894
 895	/* No redirected packets during ip_rt_redirect_silence;
 896	 * reset the algorithm.
 897	 */
 898	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
 899		peer->rate_tokens = 0;
 900		peer->n_redirects = 0;
 901	}
 902
 903	/* Too many ignored redirects; do not send anything
 904	 * set dst.rate_last to the last seen redirected packet.
 905	 */
 906	if (peer->n_redirects >= ip_rt_redirect_number) {
 907		peer->rate_last = jiffies;
 908		goto out_put_peer;
 909	}
 910
 911	/* Check for load limit; set rate_last to the latest sent
 912	 * redirect.
 913	 */
 914	if (peer->n_redirects == 0 ||
 915	    time_after(jiffies,
 916		       (peer->rate_last +
 917			(ip_rt_redirect_load << peer->n_redirects)))) {
 918		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
 919
 920		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
 921		peer->rate_last = jiffies;
 922		++peer->n_redirects;
 923		if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
 924		    peer->n_redirects == ip_rt_redirect_number)
 
 925			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
 926					     &ip_hdr(skb)->saddr, inet_iif(skb),
 927					     &ip_hdr(skb)->daddr, &gw);
 
 928	}
 929out_put_peer:
 930	inet_putpeer(peer);
 931}
 932
 933static int ip_error(struct sk_buff *skb)
 934{
 935	struct rtable *rt = skb_rtable(skb);
 936	struct net_device *dev = skb->dev;
 937	struct in_device *in_dev;
 938	struct inet_peer *peer;
 939	unsigned long now;
 940	struct net *net;
 941	SKB_DR(reason);
 942	bool send;
 943	int code;
 944
 945	if (netif_is_l3_master(skb->dev)) {
 946		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
 947		if (!dev)
 948			goto out;
 949	}
 950
 951	in_dev = __in_dev_get_rcu(dev);
 952
 953	/* IP on this device is disabled. */
 954	if (!in_dev)
 955		goto out;
 956
 957	net = dev_net(rt->dst.dev);
 958	if (!IN_DEV_FORWARD(in_dev)) {
 959		switch (rt->dst.error) {
 960		case EHOSTUNREACH:
 961			SKB_DR_SET(reason, IP_INADDRERRORS);
 962			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
 963			break;
 964
 965		case ENETUNREACH:
 966			SKB_DR_SET(reason, IP_INNOROUTES);
 967			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 968			break;
 969		}
 970		goto out;
 971	}
 972
 973	switch (rt->dst.error) {
 974	case EINVAL:
 975	default:
 976		goto out;
 977	case EHOSTUNREACH:
 978		code = ICMP_HOST_UNREACH;
 979		break;
 980	case ENETUNREACH:
 981		code = ICMP_NET_UNREACH;
 982		SKB_DR_SET(reason, IP_INNOROUTES);
 983		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 984		break;
 985	case EACCES:
 986		code = ICMP_PKT_FILTERED;
 987		break;
 988	}
 989
 990	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
 991			       l3mdev_master_ifindex(skb->dev), 1);
 
 992
 993	send = true;
 994	if (peer) {
 995		now = jiffies;
 996		peer->rate_tokens += now - peer->rate_last;
 997		if (peer->rate_tokens > ip_rt_error_burst)
 998			peer->rate_tokens = ip_rt_error_burst;
 999		peer->rate_last = now;
1000		if (peer->rate_tokens >= ip_rt_error_cost)
1001			peer->rate_tokens -= ip_rt_error_cost;
1002		else
1003			send = false;
1004		inet_putpeer(peer);
1005	}
1006	if (send)
1007		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1008
1009out:	kfree_skb_reason(skb, reason);
1010	return 0;
1011}
1012
1013static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1014{
1015	struct dst_entry *dst = &rt->dst;
1016	struct net *net = dev_net(dst->dev);
1017	struct fib_result res;
1018	bool lock = false;
1019	u32 old_mtu;
1020
1021	if (ip_mtu_locked(dst))
1022		return;
1023
1024	old_mtu = ipv4_mtu(dst);
1025	if (old_mtu < mtu)
1026		return;
1027
1028	if (mtu < net->ipv4.ip_rt_min_pmtu) {
1029		lock = true;
1030		mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
1031	}
1032
1033	if (rt->rt_pmtu == mtu && !lock &&
1034	    time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
1035		return;
1036
1037	rcu_read_lock();
1038	if (fib_lookup(net, fl4, &res, 0) == 0) {
1039		struct fib_nh_common *nhc;
1040
1041		fib_select_path(net, &res, fl4, NULL);
1042		nhc = FIB_RES_NHC(res);
1043		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1044				      jiffies + net->ipv4.ip_rt_mtu_expires);
1045	}
1046	rcu_read_unlock();
1047}
1048
1049static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1050			      struct sk_buff *skb, u32 mtu,
1051			      bool confirm_neigh)
1052{
1053	struct rtable *rt = dst_rtable(dst);
1054	struct flowi4 fl4;
 
1055
1056	ip_rt_build_flow_key(&fl4, sk, skb);
 
 
1057
1058	/* Don't make lookup fail for bridged encapsulations */
1059	if (skb && netif_is_any_bridge_port(skb->dev))
1060		fl4.flowi4_oif = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1061
1062	__ip_rt_update_pmtu(rt, &fl4, mtu);
1063}
1064
1065void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1066		      int oif, u8 protocol)
1067{
1068	const struct iphdr *iph = (const struct iphdr *)skb->data;
1069	struct flowi4 fl4;
1070	struct rtable *rt;
1071	u32 mark = IP4_REPLY_MARK(net, skb->mark);
1072
1073	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
1074			 0);
1075	rt = __ip_route_output_key(net, &fl4);
1076	if (!IS_ERR(rt)) {
1077		__ip_rt_update_pmtu(rt, &fl4, mtu);
1078		ip_rt_put(rt);
1079	}
 
1080}
1081EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1082
1083static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1084{
1085	const struct iphdr *iph = (const struct iphdr *)skb->data;
1086	struct flowi4 fl4;
1087	struct rtable *rt;
1088
1089	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1090
1091	if (!fl4.flowi4_mark)
1092		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1093
1094	rt = __ip_route_output_key(sock_net(sk), &fl4);
1095	if (!IS_ERR(rt)) {
1096		__ip_rt_update_pmtu(rt, &fl4, mtu);
1097		ip_rt_put(rt);
1098	}
 
1099}
1100
1101void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1102{
1103	const struct iphdr *iph = (const struct iphdr *)skb->data;
1104	struct flowi4 fl4;
1105	struct rtable *rt;
1106	struct dst_entry *odst = NULL;
1107	bool new = false;
1108	struct net *net = sock_net(sk);
1109
1110	bh_lock_sock(sk);
1111
1112	if (!ip_sk_accept_pmtu(sk))
1113		goto out;
 
 
 
1114
1115	odst = sk_dst_get(sk);
 
 
1116
1117	if (sock_owned_by_user(sk) || !odst) {
1118		__ipv4_sk_update_pmtu(skb, sk, mtu);
1119		goto out;
1120	}
1121
1122	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
 
1123
1124	rt = dst_rtable(odst);
1125	if (odst->obsolete && !odst->ops->check(odst, 0)) {
1126		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1127		if (IS_ERR(rt))
1128			goto out;
1129
1130		new = true;
1131	}
 
1132
1133	__ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
1134
1135	if (!dst_check(&rt->dst, 0)) {
1136		if (new)
1137			dst_release(&rt->dst);
 
1138
1139		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1140		if (IS_ERR(rt))
1141			goto out;
1142
1143		new = true;
1144	}
 
1145
1146	if (new)
1147		sk_dst_set(sk, &rt->dst);
1148
1149out:
1150	bh_unlock_sock(sk);
1151	dst_release(odst);
1152}
1153EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1154
1155void ipv4_redirect(struct sk_buff *skb, struct net *net,
1156		   int oif, u8 protocol)
1157{
1158	const struct iphdr *iph = (const struct iphdr *)skb->data;
1159	struct flowi4 fl4;
1160	struct rtable *rt;
1161
1162	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
1163	rt = __ip_route_output_key(net, &fl4);
1164	if (!IS_ERR(rt)) {
1165		__ip_do_redirect(rt, skb, &fl4, false);
1166		ip_rt_put(rt);
1167	}
1168}
1169EXPORT_SYMBOL_GPL(ipv4_redirect);
1170
1171void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1172{
1173	const struct iphdr *iph = (const struct iphdr *)skb->data;
1174	struct flowi4 fl4;
1175	struct rtable *rt;
1176	struct net *net = sock_net(sk);
1177
1178	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1179	rt = __ip_route_output_key(net, &fl4);
1180	if (!IS_ERR(rt)) {
1181		__ip_do_redirect(rt, skb, &fl4, false);
1182		ip_rt_put(rt);
1183	}
1184}
1185EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1186
1187INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1188							 u32 cookie)
1189{
1190	struct rtable *rt = dst_rtable(dst);
1191
1192	/* All IPV4 dsts are created with ->obsolete set to the value
1193	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1194	 * into this function always.
1195	 *
1196	 * When a PMTU/redirect information update invalidates a route,
1197	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1198	 * DST_OBSOLETE_DEAD.
1199	 */
1200	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1201		return NULL;
 
1202	return dst;
1203}
1204EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1205
1206static void ipv4_send_dest_unreach(struct sk_buff *skb)
1207{
1208	struct net_device *dev;
1209	struct ip_options opt;
1210	int res;
1211
1212	/* Recompile ip options since IPCB may not be valid anymore.
1213	 * Also check we have a reasonable ipv4 header.
1214	 */
1215	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1216	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1217		return;
1218
1219	memset(&opt, 0, sizeof(opt));
1220	if (ip_hdr(skb)->ihl > 5) {
1221		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1222			return;
1223		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1224
1225		rcu_read_lock();
1226		dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
1227		res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
1228		rcu_read_unlock();
1229
1230		if (res)
1231			return;
1232	}
1233	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1234}
1235
 
1236static void ipv4_link_failure(struct sk_buff *skb)
1237{
1238	struct rtable *rt;
1239
1240	ipv4_send_dest_unreach(skb);
1241
1242	rt = skb_rtable(skb);
1243	if (rt)
1244		dst_set_expires(&rt->dst, 0);
1245}
1246
1247static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1248{
1249	pr_debug("%s: %pI4 -> %pI4, %s\n",
1250		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1251		 skb->dev ? skb->dev->name : "?");
1252	kfree_skb(skb);
1253	WARN_ON(1);
1254	return 0;
1255}
1256
1257/*
1258 * We do not cache source address of outgoing interface,
1259 * because it is used only by IP RR, TS and SRR options,
1260 * so that it out of fast path.
1261 *
1262 * BTW remember: "addr" is allowed to be not aligned
1263 * in IP options!
1264 */
1265
1266void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1267{
1268	__be32 src;
1269
1270	if (rt_is_output_route(rt))
1271		src = ip_hdr(skb)->saddr;
1272	else {
1273		struct fib_result res;
1274		struct iphdr *iph = ip_hdr(skb);
1275		struct flowi4 fl4 = {
1276			.daddr = iph->daddr,
1277			.saddr = iph->saddr,
1278			.flowi4_tos = RT_TOS(iph->tos),
1279			.flowi4_oif = rt->dst.dev->ifindex,
1280			.flowi4_iif = skb->dev->ifindex,
1281			.flowi4_mark = skb->mark,
1282		};
 
 
 
1283
1284		rcu_read_lock();
1285		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1286			src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1287		else
1288			src = inet_select_addr(rt->dst.dev,
1289					       rt_nexthop(rt, iph->daddr),
1290					       RT_SCOPE_UNIVERSE);
1291		rcu_read_unlock();
1292	}
1293	memcpy(addr, &src, 4);
1294}
1295
1296#ifdef CONFIG_IP_ROUTE_CLASSID
1297static void set_class_tag(struct rtable *rt, u32 tag)
1298{
1299	if (!(rt->dst.tclassid & 0xFFFF))
1300		rt->dst.tclassid |= tag & 0xFFFF;
1301	if (!(rt->dst.tclassid & 0xFFFF0000))
1302		rt->dst.tclassid |= tag & 0xFFFF0000;
1303}
1304#endif
1305
1306static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1307{
1308	struct net *net = dev_net(dst->dev);
1309	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1310	unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1311				    net->ipv4.ip_rt_min_advmss);
1312
1313	return min(advmss, IPV4_MAX_PMTU - header_size);
1314}
1315
1316INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1317{
1318	return ip_dst_mtu_maybe_forward(dst, false);
1319}
1320EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1321
1322static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1323{
1324	struct fnhe_hash_bucket *hash;
1325	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1326	u32 hval = fnhe_hashfun(daddr);
1327
1328	spin_lock_bh(&fnhe_lock);
1329
1330	hash = rcu_dereference_protected(nhc->nhc_exceptions,
1331					 lockdep_is_held(&fnhe_lock));
1332	hash += hval;
1333
1334	fnhe_p = &hash->chain;
1335	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1336	while (fnhe) {
1337		if (fnhe->fnhe_daddr == daddr) {
1338			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1339				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1340			/* set fnhe_daddr to 0 to ensure it won't bind with
1341			 * new dsts in rt_bind_exception().
1342			 */
1343			fnhe->fnhe_daddr = 0;
1344			fnhe_flush_routes(fnhe);
1345			kfree_rcu(fnhe, rcu);
1346			break;
1347		}
1348		fnhe_p = &fnhe->fnhe_next;
1349		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1350						 lockdep_is_held(&fnhe_lock));
1351	}
1352
1353	spin_unlock_bh(&fnhe_lock);
1354}
1355
1356static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1357					       __be32 daddr)
1358{
1359	struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1360	struct fib_nh_exception *fnhe;
1361	u32 hval;
1362
1363	if (!hash)
1364		return NULL;
1365
1366	hval = fnhe_hashfun(daddr);
1367
1368	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1369	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1370		if (fnhe->fnhe_daddr == daddr) {
1371			if (fnhe->fnhe_expires &&
1372			    time_after(jiffies, fnhe->fnhe_expires)) {
1373				ip_del_fnhe(nhc, daddr);
1374				break;
1375			}
1376			return fnhe;
1377		}
1378	}
1379	return NULL;
1380}
1381
1382/* MTU selection:
1383 * 1. mtu on route is locked - use it
1384 * 2. mtu from nexthop exception
1385 * 3. mtu from egress device
1386 */
1387
1388u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1389{
1390	struct fib_nh_common *nhc = res->nhc;
1391	struct net_device *dev = nhc->nhc_dev;
1392	struct fib_info *fi = res->fi;
1393	u32 mtu = 0;
1394
1395	if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
1396	    fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1397		mtu = fi->fib_mtu;
1398
1399	if (likely(!mtu)) {
1400		struct fib_nh_exception *fnhe;
1401
1402		fnhe = find_exception(nhc, daddr);
1403		if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1404			mtu = fnhe->fnhe_pmtu;
1405	}
1406
1407	if (likely(!mtu))
1408		mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1409
1410	return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1411}
1412
1413static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1414			      __be32 daddr, const bool do_cache)
1415{
1416	bool ret = false;
 
1417
1418	spin_lock_bh(&fnhe_lock);
 
 
 
 
1419
1420	if (daddr == fnhe->fnhe_daddr) {
1421		struct rtable __rcu **porig;
1422		struct rtable *orig;
1423		int genid = fnhe_genid(dev_net(rt->dst.dev));
1424
1425		if (rt_is_input_route(rt))
1426			porig = &fnhe->fnhe_rth_input;
1427		else
1428			porig = &fnhe->fnhe_rth_output;
1429		orig = rcu_dereference(*porig);
1430
1431		if (fnhe->fnhe_genid != genid) {
1432			fnhe->fnhe_genid = genid;
1433			fnhe->fnhe_gw = 0;
1434			fnhe->fnhe_pmtu = 0;
1435			fnhe->fnhe_expires = 0;
1436			fnhe->fnhe_mtu_locked = false;
1437			fnhe_flush_routes(fnhe);
1438			orig = NULL;
1439		}
1440		fill_route_from_fnhe(rt, fnhe);
1441		if (!rt->rt_gw4) {
1442			rt->rt_gw4 = daddr;
1443			rt->rt_gw_family = AF_INET;
1444		}
1445
1446		if (do_cache) {
1447			dst_hold(&rt->dst);
1448			rcu_assign_pointer(*porig, rt);
1449			if (orig) {
1450				dst_dev_put(&orig->dst);
1451				dst_release(&orig->dst);
1452			}
1453			ret = true;
1454		}
1455
1456		fnhe->fnhe_stamp = jiffies;
1457	}
1458	spin_unlock_bh(&fnhe_lock);
1459
1460	return ret;
1461}
1462
1463static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1464{
1465	struct rtable *orig, *prev, **p;
1466	bool ret = true;
1467
1468	if (rt_is_input_route(rt)) {
1469		p = (struct rtable **)&nhc->nhc_rth_input;
1470	} else {
1471		p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1472	}
1473	orig = *p;
1474
1475	/* hold dst before doing cmpxchg() to avoid race condition
1476	 * on this dst
1477	 */
1478	dst_hold(&rt->dst);
1479	prev = cmpxchg(p, orig, rt);
1480	if (prev == orig) {
1481		if (orig) {
1482			rt_add_uncached_list(orig);
1483			dst_release(&orig->dst);
1484		}
1485	} else {
1486		dst_release(&rt->dst);
1487		ret = false;
1488	}
1489
1490	return ret;
1491}
1492
1493struct uncached_list {
1494	spinlock_t		lock;
1495	struct list_head	head;
1496	struct list_head	quarantine;
1497};
1498
1499static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1500
1501void rt_add_uncached_list(struct rtable *rt)
1502{
1503	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1504
1505	rt->dst.rt_uncached_list = ul;
1506
1507	spin_lock_bh(&ul->lock);
1508	list_add_tail(&rt->dst.rt_uncached, &ul->head);
1509	spin_unlock_bh(&ul->lock);
1510}
1511
1512void rt_del_uncached_list(struct rtable *rt)
1513{
1514	if (!list_empty(&rt->dst.rt_uncached)) {
1515		struct uncached_list *ul = rt->dst.rt_uncached_list;
1516
1517		spin_lock_bh(&ul->lock);
1518		list_del_init(&rt->dst.rt_uncached);
1519		spin_unlock_bh(&ul->lock);
1520	}
1521}
1522
1523static void ipv4_dst_destroy(struct dst_entry *dst)
1524{
1525	ip_dst_metrics_put(dst);
1526	rt_del_uncached_list(dst_rtable(dst));
1527}
1528
1529void rt_flush_dev(struct net_device *dev)
1530{
1531	struct rtable *rt, *safe;
1532	int cpu;
1533
1534	for_each_possible_cpu(cpu) {
1535		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1536
1537		if (list_empty(&ul->head))
1538			continue;
1539
1540		spin_lock_bh(&ul->lock);
1541		list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
1542			if (rt->dst.dev != dev)
1543				continue;
1544			rt->dst.dev = blackhole_netdev;
1545			netdev_ref_replace(dev, blackhole_netdev,
1546					   &rt->dst.dev_tracker, GFP_ATOMIC);
1547			list_move(&rt->dst.rt_uncached, &ul->quarantine);
1548		}
1549		spin_unlock_bh(&ul->lock);
1550	}
1551}
1552
1553static bool rt_cache_valid(const struct rtable *rt)
1554{
1555	return	rt &&
1556		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1557		!rt_is_expired(rt);
1558}
1559
1560static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1561			   const struct fib_result *res,
1562			   struct fib_nh_exception *fnhe,
1563			   struct fib_info *fi, u16 type, u32 itag,
1564			   const bool do_cache)
1565{
1566	bool cached = false;
1567
1568	if (fi) {
1569		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1570
1571		if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1572			rt->rt_uses_gateway = 1;
1573			rt->rt_gw_family = nhc->nhc_gw_family;
1574			/* only INET and INET6 are supported */
1575			if (likely(nhc->nhc_gw_family == AF_INET))
1576				rt->rt_gw4 = nhc->nhc_gw.ipv4;
1577			else
1578				rt->rt_gw6 = nhc->nhc_gw.ipv6;
1579		}
1580
1581		ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1582
1583#ifdef CONFIG_IP_ROUTE_CLASSID
1584		if (nhc->nhc_family == AF_INET) {
1585			struct fib_nh *nh;
 
1586
1587			nh = container_of(nhc, struct fib_nh, nh_common);
1588			rt->dst.tclassid = nh->nh_tclassid;
1589		}
1590#endif
1591		rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1592		if (unlikely(fnhe))
1593			cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1594		else if (do_cache)
1595			cached = rt_cache_route(nhc, rt);
1596		if (unlikely(!cached)) {
1597			/* Routes we intend to cache in nexthop exception or
1598			 * FIB nexthop have the DST_NOCACHE bit clear.
1599			 * However, if we are unsuccessful at storing this
1600			 * route into the cache we really need to set it.
1601			 */
1602			if (!rt->rt_gw4) {
1603				rt->rt_gw_family = AF_INET;
1604				rt->rt_gw4 = daddr;
1605			}
1606			rt_add_uncached_list(rt);
1607		}
1608	} else
1609		rt_add_uncached_list(rt);
1610
1611#ifdef CONFIG_IP_ROUTE_CLASSID
1612#ifdef CONFIG_IP_MULTIPLE_TABLES
1613	set_class_tag(rt, res->tclassid);
1614#endif
1615	set_class_tag(rt, itag);
1616#endif
1617}
1618
1619struct rtable *rt_dst_alloc(struct net_device *dev,
1620			    unsigned int flags, u16 type,
1621			    bool noxfrm)
1622{
1623	struct rtable *rt;
1624
1625	rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1626		       (noxfrm ? DST_NOXFRM : 0));
1627
1628	if (rt) {
1629		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1630		rt->rt_flags = flags;
1631		rt->rt_type = type;
1632		rt->rt_is_input = 0;
1633		rt->rt_iif = 0;
1634		rt->rt_pmtu = 0;
1635		rt->rt_mtu_locked = 0;
1636		rt->rt_uses_gateway = 0;
1637		rt->rt_gw_family = 0;
1638		rt->rt_gw4 = 0;
1639
1640		rt->dst.output = ip_output;
1641		if (flags & RTCF_LOCAL)
1642			rt->dst.input = ip_local_deliver;
1643	}
1644
1645	return rt;
1646}
1647EXPORT_SYMBOL(rt_dst_alloc);
1648
1649struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1650{
1651	struct rtable *new_rt;
1652
1653	new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1654			   rt->dst.flags);
1655
1656	if (new_rt) {
1657		new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1658		new_rt->rt_flags = rt->rt_flags;
1659		new_rt->rt_type = rt->rt_type;
1660		new_rt->rt_is_input = rt->rt_is_input;
1661		new_rt->rt_iif = rt->rt_iif;
1662		new_rt->rt_pmtu = rt->rt_pmtu;
1663		new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1664		new_rt->rt_gw_family = rt->rt_gw_family;
1665		if (rt->rt_gw_family == AF_INET)
1666			new_rt->rt_gw4 = rt->rt_gw4;
1667		else if (rt->rt_gw_family == AF_INET6)
1668			new_rt->rt_gw6 = rt->rt_gw6;
1669
1670		new_rt->dst.input = rt->dst.input;
1671		new_rt->dst.output = rt->dst.output;
1672		new_rt->dst.error = rt->dst.error;
1673		new_rt->dst.lastuse = jiffies;
1674		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1675	}
1676	return new_rt;
1677}
1678EXPORT_SYMBOL(rt_dst_clone);
1679
1680/* called in rcu_read_lock() section */
1681int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1682			  u8 tos, struct net_device *dev,
1683			  struct in_device *in_dev, u32 *itag)
1684{
 
 
 
 
 
1685	int err;
1686
1687	/* Primary sanity checks. */
1688	if (!in_dev)
 
1689		return -EINVAL;
1690
1691	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1692	    skb->protocol != htons(ETH_P_IP))
1693		return -EINVAL;
1694
1695	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1696		return -EINVAL;
1697
1698	if (ipv4_is_zeronet(saddr)) {
1699		if (!ipv4_is_local_multicast(daddr) &&
1700		    ip_hdr(skb)->protocol != IPPROTO_IGMP)
1701			return -EINVAL;
1702	} else {
1703		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1704					  in_dev, itag);
1705		if (err < 0)
1706			return err;
1707	}
1708	return 0;
1709}
1710
1711/* called in rcu_read_lock() section */
1712static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1713			     u8 tos, struct net_device *dev, int our)
1714{
1715	struct in_device *in_dev = __in_dev_get_rcu(dev);
1716	unsigned int flags = RTCF_MULTICAST;
1717	struct rtable *rth;
1718	u32 itag = 0;
1719	int err;
1720
1721	err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1722	if (err)
1723		return err;
1724
1725	if (our)
1726		flags |= RTCF_LOCAL;
1727
1728	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1729		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1730
1731	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1732			   false);
1733	if (!rth)
1734		return -ENOBUFS;
1735
1736#ifdef CONFIG_IP_ROUTE_CLASSID
1737	rth->dst.tclassid = itag;
1738#endif
1739	rth->dst.output = ip_rt_bug;
1740	rth->rt_is_input= 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1741
1742#ifdef CONFIG_IP_MROUTE
1743	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1744		rth->dst.input = ip_mr_input;
1745#endif
1746	RT_CACHE_STAT_INC(in_slow_mc);
1747
1748	skb_dst_drop(skb);
1749	skb_dst_set(skb, &rth->dst);
1750	return 0;
 
 
 
 
 
 
 
1751}
1752
1753
1754static void ip_handle_martian_source(struct net_device *dev,
1755				     struct in_device *in_dev,
1756				     struct sk_buff *skb,
1757				     __be32 daddr,
1758				     __be32 saddr)
1759{
1760	RT_CACHE_STAT_INC(in_martian_src);
1761#ifdef CONFIG_IP_ROUTE_VERBOSE
1762	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1763		/*
1764		 *	RFC1812 recommendation, if source is martian,
1765		 *	the only hint is MAC header.
1766		 */
1767		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1768			&daddr, &saddr, dev->name);
1769		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1770			print_hex_dump(KERN_WARNING, "ll header: ",
1771				       DUMP_PREFIX_OFFSET, 16, 1,
1772				       skb_mac_header(skb),
1773				       dev->hard_header_len, false);
1774		}
1775	}
1776#endif
1777}
1778
1779/* called in rcu_read_lock() section */
1780static int __mkroute_input(struct sk_buff *skb,
1781			   const struct fib_result *res,
1782			   struct in_device *in_dev,
1783			   __be32 daddr, __be32 saddr, u32 tos)
 
1784{
1785	struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1786	struct net_device *dev = nhc->nhc_dev;
1787	struct fib_nh_exception *fnhe;
1788	struct rtable *rth;
1789	int err;
1790	struct in_device *out_dev;
1791	bool do_cache;
1792	u32 itag = 0;
 
1793
1794	/* get a working reference to the output device */
1795	out_dev = __in_dev_get_rcu(dev);
1796	if (!out_dev) {
1797		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1798		return -EINVAL;
1799	}
1800
 
1801	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1802				  in_dev->dev, in_dev, &itag);
1803	if (err < 0) {
1804		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1805					 saddr);
1806
1807		goto cleanup;
1808	}
1809
1810	do_cache = res->fi && !itag;
1811	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1812	    skb->protocol == htons(ETH_P_IP)) {
1813		__be32 gw;
1814
1815		gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1816		if (IN_DEV_SHARED_MEDIA(out_dev) ||
1817		    inet_addr_onlink(out_dev, saddr, gw))
1818			IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1819	}
1820
1821	if (skb->protocol != htons(ETH_P_IP)) {
1822		/* Not IP (i.e. ARP). Do not create route, if it is
1823		 * invalid for proxy arp. DNAT routes are always valid.
1824		 *
1825		 * Proxy arp feature have been extended to allow, ARP
1826		 * replies back to the same interface, to support
1827		 * Private VLAN switch technologies. See arp.c.
1828		 */
1829		if (out_dev == in_dev &&
1830		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1831			err = -EINVAL;
1832			goto cleanup;
1833		}
1834	}
1835
1836	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1837		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1838
1839	fnhe = find_exception(nhc, daddr);
1840	if (do_cache) {
1841		if (fnhe)
1842			rth = rcu_dereference(fnhe->fnhe_rth_input);
1843		else
1844			rth = rcu_dereference(nhc->nhc_rth_input);
1845		if (rt_cache_valid(rth)) {
1846			skb_dst_set_noref(skb, &rth->dst);
1847			goto out;
1848		}
1849	}
1850
1851	rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1852			   IN_DEV_ORCONF(out_dev, NOXFRM));
1853	if (!rth) {
1854		err = -ENOBUFS;
1855		goto cleanup;
1856	}
1857
1858	rth->rt_is_input = 1;
1859	RT_CACHE_STAT_INC(in_slow_tot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1860
1861	rth->dst.input = ip_forward;
 
1862
1863	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1864		       do_cache);
1865	lwtunnel_set_redirect(&rth->dst);
1866	skb_dst_set(skb, &rth->dst);
1867out:
1868	err = 0;
1869 cleanup:
1870	return err;
1871}
1872
1873#ifdef CONFIG_IP_ROUTE_MULTIPATH
1874/* To make ICMP packets follow the right flow, the multipath hash is
1875 * calculated from the inner IP addresses.
1876 */
1877static void ip_multipath_l3_keys(const struct sk_buff *skb,
1878				 struct flow_keys *hash_keys)
1879{
1880	const struct iphdr *outer_iph = ip_hdr(skb);
1881	const struct iphdr *key_iph = outer_iph;
1882	const struct iphdr *inner_iph;
1883	const struct icmphdr *icmph;
1884	struct iphdr _inner_iph;
1885	struct icmphdr _icmph;
1886
1887	if (likely(outer_iph->protocol != IPPROTO_ICMP))
1888		goto out;
1889
1890	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1891		goto out;
1892
1893	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1894				   &_icmph);
1895	if (!icmph)
1896		goto out;
1897
1898	if (!icmp_is_err(icmph->type))
1899		goto out;
1900
1901	inner_iph = skb_header_pointer(skb,
1902				       outer_iph->ihl * 4 + sizeof(_icmph),
1903				       sizeof(_inner_iph), &_inner_iph);
1904	if (!inner_iph)
1905		goto out;
1906
1907	key_iph = inner_iph;
1908out:
1909	hash_keys->addrs.v4addrs.src = key_iph->saddr;
1910	hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1911}
1912
1913static u32 fib_multipath_custom_hash_outer(const struct net *net,
1914					   const struct sk_buff *skb,
1915					   bool *p_has_inner)
1916{
1917	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1918	struct flow_keys keys, hash_keys;
1919
1920	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1921		return 0;
1922
1923	memset(&hash_keys, 0, sizeof(hash_keys));
1924	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1925
1926	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1927	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1928		hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1929	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1930		hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1931	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1932		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1933	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1934		hash_keys.ports.src = keys.ports.src;
1935	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1936		hash_keys.ports.dst = keys.ports.dst;
1937
1938	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1939	return flow_hash_from_keys(&hash_keys);
1940}
1941
1942static u32 fib_multipath_custom_hash_inner(const struct net *net,
1943					   const struct sk_buff *skb,
1944					   bool has_inner)
1945{
1946	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1947	struct flow_keys keys, hash_keys;
1948
1949	/* We assume the packet carries an encapsulation, but if none was
1950	 * encountered during dissection of the outer flow, then there is no
1951	 * point in calling the flow dissector again.
1952	 */
1953	if (!has_inner)
1954		return 0;
1955
1956	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1957		return 0;
1958
1959	memset(&hash_keys, 0, sizeof(hash_keys));
1960	skb_flow_dissect_flow_keys(skb, &keys, 0);
1961
1962	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1963		return 0;
1964
1965	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1966		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1967		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1968			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1969		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1970			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1971	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1972		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1973		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1974			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1975		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1976			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1977		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1978			hash_keys.tags.flow_label = keys.tags.flow_label;
1979	}
1980
1981	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1982		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1983	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1984		hash_keys.ports.src = keys.ports.src;
1985	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
1986		hash_keys.ports.dst = keys.ports.dst;
1987
1988	return flow_hash_from_keys(&hash_keys);
1989}
1990
1991static u32 fib_multipath_custom_hash_skb(const struct net *net,
1992					 const struct sk_buff *skb)
1993{
1994	u32 mhash, mhash_inner;
1995	bool has_inner = true;
1996
1997	mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
1998	mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
1999
2000	return jhash_2words(mhash, mhash_inner, 0);
2001}
2002
2003static u32 fib_multipath_custom_hash_fl4(const struct net *net,
2004					 const struct flowi4 *fl4)
2005{
2006	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
2007	struct flow_keys hash_keys;
2008
2009	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2010		return 0;
2011
2012	memset(&hash_keys, 0, sizeof(hash_keys));
2013	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2014	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2015		hash_keys.addrs.v4addrs.src = fl4->saddr;
2016	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2017		hash_keys.addrs.v4addrs.dst = fl4->daddr;
2018	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2019		hash_keys.basic.ip_proto = fl4->flowi4_proto;
2020	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2021		hash_keys.ports.src = fl4->fl4_sport;
2022	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2023		hash_keys.ports.dst = fl4->fl4_dport;
2024
2025	return flow_hash_from_keys(&hash_keys);
2026}
2027
2028/* if skb is set it will be used and fl4 can be NULL */
2029int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2030		       const struct sk_buff *skb, struct flow_keys *flkeys)
2031{
2032	u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2033	struct flow_keys hash_keys;
2034	u32 mhash = 0;
2035
2036	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
2037	case 0:
2038		memset(&hash_keys, 0, sizeof(hash_keys));
2039		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2040		if (skb) {
2041			ip_multipath_l3_keys(skb, &hash_keys);
2042		} else {
2043			hash_keys.addrs.v4addrs.src = fl4->saddr;
2044			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2045		}
2046		mhash = flow_hash_from_keys(&hash_keys);
2047		break;
2048	case 1:
2049		/* skb is currently provided only when forwarding */
2050		if (skb) {
2051			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2052			struct flow_keys keys;
2053
2054			/* short-circuit if we already have L4 hash present */
2055			if (skb->l4_hash)
2056				return skb_get_hash_raw(skb) >> 1;
2057
2058			memset(&hash_keys, 0, sizeof(hash_keys));
2059
2060			if (!flkeys) {
2061				skb_flow_dissect_flow_keys(skb, &keys, flag);
2062				flkeys = &keys;
2063			}
2064
2065			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2066			hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2067			hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2068			hash_keys.ports.src = flkeys->ports.src;
2069			hash_keys.ports.dst = flkeys->ports.dst;
2070			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2071		} else {
2072			memset(&hash_keys, 0, sizeof(hash_keys));
2073			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2074			hash_keys.addrs.v4addrs.src = fl4->saddr;
2075			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2076			hash_keys.ports.src = fl4->fl4_sport;
2077			hash_keys.ports.dst = fl4->fl4_dport;
2078			hash_keys.basic.ip_proto = fl4->flowi4_proto;
2079		}
2080		mhash = flow_hash_from_keys(&hash_keys);
2081		break;
2082	case 2:
2083		memset(&hash_keys, 0, sizeof(hash_keys));
2084		/* skb is currently provided only when forwarding */
2085		if (skb) {
2086			struct flow_keys keys;
2087
2088			skb_flow_dissect_flow_keys(skb, &keys, 0);
2089			/* Inner can be v4 or v6 */
2090			if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2091				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2092				hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2093				hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2094			} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2095				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2096				hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2097				hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2098				hash_keys.tags.flow_label = keys.tags.flow_label;
2099				hash_keys.basic.ip_proto = keys.basic.ip_proto;
2100			} else {
2101				/* Same as case 0 */
2102				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2103				ip_multipath_l3_keys(skb, &hash_keys);
2104			}
2105		} else {
2106			/* Same as case 0 */
2107			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2108			hash_keys.addrs.v4addrs.src = fl4->saddr;
2109			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2110		}
2111		mhash = flow_hash_from_keys(&hash_keys);
2112		break;
2113	case 3:
2114		if (skb)
2115			mhash = fib_multipath_custom_hash_skb(net, skb);
2116		else
2117			mhash = fib_multipath_custom_hash_fl4(net, fl4);
2118		break;
2119	}
2120
2121	if (multipath_hash)
2122		mhash = jhash_2words(mhash, multipath_hash, 0);
2123
2124	return mhash >> 1;
2125}
2126#endif /* CONFIG_IP_ROUTE_MULTIPATH */
2127
2128static int ip_mkroute_input(struct sk_buff *skb,
2129			    struct fib_result *res,
 
2130			    struct in_device *in_dev,
2131			    __be32 daddr, __be32 saddr, u32 tos,
2132			    struct flow_keys *hkeys)
2133{
 
 
 
 
2134#ifdef CONFIG_IP_ROUTE_MULTIPATH
2135	if (res->fi && fib_info_num_path(res->fi) > 1) {
2136		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2137
2138		fib_select_multipath(res, h);
2139		IPCB(skb)->flags |= IPSKB_MULTIPATH;
2140	}
2141#endif
2142
2143	/* create a routing cache entry */
2144	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2145}
 
2146
2147/* Implements all the saddr-related checks as ip_route_input_slow(),
2148 * assuming daddr is valid and the destination is not a local broadcast one.
2149 * Uses the provided hint instead of performing a route lookup.
2150 */
2151int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2152		      u8 tos, struct net_device *dev,
2153		      const struct sk_buff *hint)
2154{
2155	struct in_device *in_dev = __in_dev_get_rcu(dev);
2156	struct rtable *rt = skb_rtable(hint);
2157	struct net *net = dev_net(dev);
2158	int err = -EINVAL;
2159	u32 tag = 0;
2160
2161	if (!in_dev)
2162		return -EINVAL;
2163
2164	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2165		goto martian_source;
2166
2167	if (ipv4_is_zeronet(saddr))
2168		goto martian_source;
2169
2170	if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2171		goto martian_source;
2172
2173	if (rt->rt_type != RTN_LOCAL)
2174		goto skip_validate_source;
2175
2176	tos &= IPTOS_RT_MASK;
2177	err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2178	if (err < 0)
2179		goto martian_source;
2180
2181skip_validate_source:
2182	skb_dst_copy(skb, hint);
2183	return 0;
2184
2185martian_source:
2186	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2187	return err;
2188}
2189
2190/* get device for dst_alloc with local routes */
2191static struct net_device *ip_rt_get_dev(struct net *net,
2192					const struct fib_result *res)
2193{
2194	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2195	struct net_device *dev = NULL;
2196
2197	if (nhc)
2198		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2199
2200	return dev ? : net->loopback_dev;
2201}
2202
2203/*
2204 *	NOTE. We drop all the packets that has local source
2205 *	addresses, because every properly looped back packet
2206 *	must have correct destination already attached by output routine.
2207 *	Changes in the enforced policies must be applied also to
2208 *	ip_route_use_hint().
2209 *
2210 *	Such approach solves two big problems:
2211 *	1. Not simplex devices are handled properly.
2212 *	2. IP spoofing attempts are filtered with 100% of guarantee.
2213 *	called with rcu_read_lock()
2214 */
2215
2216static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2217			       u8 tos, struct net_device *dev,
2218			       struct fib_result *res)
2219{
 
2220	struct in_device *in_dev = __in_dev_get_rcu(dev);
2221	struct flow_keys *flkeys = NULL, _flkeys;
2222	struct net    *net = dev_net(dev);
2223	struct ip_tunnel_info *tun_info;
2224	int		err = -EINVAL;
2225	unsigned int	flags = 0;
2226	u32		itag = 0;
2227	struct rtable	*rth;
2228	struct flowi4	fl4;
2229	bool do_cache = true;
 
 
2230
2231	/* IP on this device is disabled. */
2232
2233	if (!in_dev)
2234		goto out;
2235
2236	/* Check for the most weird martians, which can be not detected
2237	 * by fib_lookup.
2238	 */
2239
2240	tun_info = skb_tunnel_info(skb);
2241	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2242		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2243	else
2244		fl4.flowi4_tun_key.tun_id = 0;
2245	skb_dst_drop(skb);
2246
2247	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2248		goto martian_source;
2249
2250	res->fi = NULL;
2251	res->table = NULL;
2252	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2253		goto brd_input;
2254
2255	/* Accept zero addresses only to limited broadcast;
2256	 * I even do not know to fix it or not. Waiting for complains :-)
2257	 */
2258	if (ipv4_is_zeronet(saddr))
2259		goto martian_source;
2260
2261	if (ipv4_is_zeronet(daddr))
2262		goto martian_destination;
2263
2264	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2265	 * and call it once if daddr or/and saddr are loopback addresses
2266	 */
2267	if (ipv4_is_loopback(daddr)) {
2268		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2269			goto martian_destination;
2270	} else if (ipv4_is_loopback(saddr)) {
2271		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2272			goto martian_source;
2273	}
2274
2275	/*
2276	 *	Now we are ready to route packet.
2277	 */
2278	fl4.flowi4_l3mdev = 0;
2279	fl4.flowi4_oif = 0;
2280	fl4.flowi4_iif = dev->ifindex;
2281	fl4.flowi4_mark = skb->mark;
2282	fl4.flowi4_tos = tos;
2283	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2284	fl4.flowi4_flags = 0;
2285	fl4.daddr = daddr;
2286	fl4.saddr = saddr;
2287	fl4.flowi4_uid = sock_net_uid(net, NULL);
2288	fl4.flowi4_multipath_hash = 0;
2289
2290	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2291		flkeys = &_flkeys;
2292	} else {
2293		fl4.flowi4_proto = 0;
2294		fl4.fl4_sport = 0;
2295		fl4.fl4_dport = 0;
2296	}
2297
2298	err = fib_lookup(net, &fl4, res, 0);
2299	if (err != 0) {
2300		if (!IN_DEV_FORWARD(in_dev))
2301			err = -EHOSTUNREACH;
2302		goto no_route;
2303	}
2304
2305	if (res->type == RTN_BROADCAST) {
2306		if (IN_DEV_BFORWARD(in_dev))
2307			goto make_route;
2308		/* not do cache if bc_forwarding is enabled */
2309		if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
2310			do_cache = false;
2311		goto brd_input;
2312	}
2313
2314	if (res->type == RTN_LOCAL) {
2315		err = fib_validate_source(skb, saddr, daddr, tos,
2316					  0, dev, in_dev, &itag);
 
2317		if (err < 0)
2318			goto martian_source;
 
 
 
2319		goto local_input;
2320	}
2321
2322	if (!IN_DEV_FORWARD(in_dev)) {
2323		err = -EHOSTUNREACH;
2324		goto no_route;
2325	}
2326	if (res->type != RTN_UNICAST)
2327		goto martian_destination;
2328
2329make_route:
2330	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2331out:	return err;
2332
2333brd_input:
2334	if (skb->protocol != htons(ETH_P_IP))
2335		goto e_inval;
2336
2337	if (!ipv4_is_zeronet(saddr)) {
2338		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2339					  in_dev, &itag);
 
 
2340		if (err < 0)
2341			goto martian_source;
 
 
2342	}
2343	flags |= RTCF_BROADCAST;
2344	res->type = RTN_BROADCAST;
2345	RT_CACHE_STAT_INC(in_brd);
2346
2347local_input:
2348	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
2349		IPCB(skb)->flags |= IPSKB_NOPOLICY;
2350
2351	do_cache &= res->fi && !itag;
2352	if (do_cache) {
2353		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2354
2355		rth = rcu_dereference(nhc->nhc_rth_input);
2356		if (rt_cache_valid(rth)) {
2357			skb_dst_set_noref(skb, &rth->dst);
2358			err = 0;
2359			goto out;
2360		}
2361	}
2362
2363	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2364			   flags | RTCF_LOCAL, res->type, false);
2365	if (!rth)
2366		goto e_nobufs;
2367
 
2368	rth->dst.output= ip_rt_bug;
2369#ifdef CONFIG_IP_ROUTE_CLASSID
2370	rth->dst.tclassid = itag;
2371#endif
2372	rth->rt_is_input = 1;
2373
2374	RT_CACHE_STAT_INC(in_slow_tot);
2375	if (res->type == RTN_UNREACHABLE) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2376		rth->dst.input= ip_error;
2377		rth->dst.error= -err;
2378		rth->rt_flags	&= ~RTCF_LOCAL;
2379	}
2380
2381	if (do_cache) {
2382		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2383
2384		rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2385		if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2386			WARN_ON(rth->dst.input == lwtunnel_input);
2387			rth->dst.lwtstate->orig_input = rth->dst.input;
2388			rth->dst.input = lwtunnel_input;
2389		}
2390
2391		if (unlikely(!rt_cache_route(nhc, rth)))
2392			rt_add_uncached_list(rth);
2393	}
2394	skb_dst_set(skb, &rth->dst);
 
2395	err = 0;
 
 
2396	goto out;
2397
2398no_route:
2399	RT_CACHE_STAT_INC(in_no_route);
2400	res->type = RTN_UNREACHABLE;
2401	res->fi = NULL;
2402	res->table = NULL;
 
2403	goto local_input;
2404
2405	/*
2406	 *	Do not cache martian addresses: they should be logged (RFC1812)
2407	 */
2408martian_destination:
2409	RT_CACHE_STAT_INC(in_martian_dst);
2410#ifdef CONFIG_IP_ROUTE_VERBOSE
2411	if (IN_DEV_LOG_MARTIANS(in_dev))
2412		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2413				     &daddr, &saddr, dev->name);
2414#endif
2415
 
 
 
 
2416e_inval:
2417	err = -EINVAL;
2418	goto out;
2419
2420e_nobufs:
2421	err = -ENOBUFS;
2422	goto out;
2423
2424martian_source:
 
 
2425	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2426	goto out;
2427}
2428
2429/* called with rcu_read_lock held */
2430static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2431			      u8 tos, struct net_device *dev, struct fib_result *res)
2432{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2433	/* Multicast recognition logic is moved from route cache to here.
2434	 * The problem was that too many Ethernet cards have broken/missing
2435	 * hardware multicast filters :-( As result the host on multicasting
2436	 * network acquires a lot of useless route cache entries, sort of
2437	 * SDR messages from all the world. Now we try to get rid of them.
2438	 * Really, provided software IP multicast filter is organized
2439	 * reasonably (at least, hashed), it does not result in a slowdown
2440	 * comparing with route cache reject entries.
2441	 * Note, that multicast routers are not affected, because
2442	 * route cache entry is created eventually.
2443	 */
2444	if (ipv4_is_multicast(daddr)) {
2445		struct in_device *in_dev = __in_dev_get_rcu(dev);
2446		int our = 0;
2447		int err = -EINVAL;
2448
2449		if (!in_dev)
2450			return err;
2451		our = ip_check_mc_rcu(in_dev, daddr, saddr,
2452				      ip_hdr(skb)->protocol);
2453
2454		/* check l3 master if no match yet */
2455		if (!our && netif_is_l3_slave(dev)) {
2456			struct in_device *l3_in_dev;
2457
2458			l3_in_dev = __in_dev_get_rcu(skb->dev);
2459			if (l3_in_dev)
2460				our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2461						      ip_hdr(skb)->protocol);
2462		}
2463
2464		if (our
2465#ifdef CONFIG_IP_MROUTE
2466			||
2467		    (!ipv4_is_local_multicast(daddr) &&
2468		     IN_DEV_MFORWARD(in_dev))
2469#endif
2470		   ) {
2471			err = ip_route_input_mc(skb, daddr, saddr,
2472						tos, dev, our);
 
 
 
2473		}
2474		return err;
 
2475	}
2476
2477	return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2478}
2479
2480int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2481			 u8 tos, struct net_device *dev)
2482{
2483	struct fib_result res;
2484	int err;
2485
2486	tos &= IPTOS_RT_MASK;
2487	rcu_read_lock();
2488	err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2489	rcu_read_unlock();
2490
2491	return err;
2492}
2493EXPORT_SYMBOL(ip_route_input_noref);
2494
2495/* called with rcu_read_lock() */
2496static struct rtable *__mkroute_output(const struct fib_result *res,
2497				       const struct flowi4 *fl4, int orig_oif,
 
 
2498				       struct net_device *dev_out,
2499				       unsigned int flags)
2500{
2501	struct fib_info *fi = res->fi;
2502	struct fib_nh_exception *fnhe;
2503	struct in_device *in_dev;
2504	u16 type = res->type;
2505	struct rtable *rth;
2506	bool do_cache;
2507
2508	in_dev = __in_dev_get_rcu(dev_out);
2509	if (!in_dev)
2510		return ERR_PTR(-EINVAL);
2511
2512	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2513		if (ipv4_is_loopback(fl4->saddr) &&
2514		    !(dev_out->flags & IFF_LOOPBACK) &&
2515		    !netif_is_l3_master(dev_out))
2516			return ERR_PTR(-EINVAL);
2517
2518	if (ipv4_is_lbcast(fl4->daddr))
2519		type = RTN_BROADCAST;
2520	else if (ipv4_is_multicast(fl4->daddr))
2521		type = RTN_MULTICAST;
2522	else if (ipv4_is_zeronet(fl4->daddr))
2523		return ERR_PTR(-EINVAL);
2524
2525	if (dev_out->flags & IFF_LOOPBACK)
2526		flags |= RTCF_LOCAL;
2527
2528	do_cache = true;
 
 
 
2529	if (type == RTN_BROADCAST) {
2530		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2531		fi = NULL;
2532	} else if (type == RTN_MULTICAST) {
2533		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2534		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2535				     fl4->flowi4_proto))
2536			flags &= ~RTCF_LOCAL;
2537		else
2538			do_cache = false;
2539		/* If multicast route do not exist use
2540		 * default one, but do not gateway in this case.
2541		 * Yes, it is hack.
2542		 */
2543		if (fi && res->prefixlen < 4)
2544			fi = NULL;
2545	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2546		   (orig_oif != dev_out->ifindex)) {
2547		/* For local routes that require a particular output interface
2548		 * we do not want to cache the result.  Caching the result
2549		 * causes incorrect behaviour when there are multiple source
2550		 * addresses on the interface, the end result being that if the
2551		 * intended recipient is waiting on that interface for the
2552		 * packet he won't receive it because it will be delivered on
2553		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2554		 * be set to the loopback interface as well.
2555		 */
2556		do_cache = false;
2557	}
2558
2559	fnhe = NULL;
2560	do_cache &= fi != NULL;
2561	if (fi) {
2562		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2563		struct rtable __rcu **prth;
2564
2565		fnhe = find_exception(nhc, fl4->daddr);
2566		if (!do_cache)
2567			goto add;
2568		if (fnhe) {
2569			prth = &fnhe->fnhe_rth_output;
2570		} else {
2571			if (unlikely(fl4->flowi4_flags &
2572				     FLOWI_FLAG_KNOWN_NH &&
2573				     !(nhc->nhc_gw_family &&
2574				       nhc->nhc_scope == RT_SCOPE_LINK))) {
2575				do_cache = false;
2576				goto add;
2577			}
2578			prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2579		}
2580		rth = rcu_dereference(*prth);
2581		if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2582			return rth;
2583	}
2584
2585add:
2586	rth = rt_dst_alloc(dev_out, flags, type,
2587			   IN_DEV_ORCONF(in_dev, NOXFRM));
2588	if (!rth)
2589		return ERR_PTR(-ENOBUFS);
2590
2591	rth->rt_iif = orig_oif;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2592
2593	RT_CACHE_STAT_INC(out_slow_tot);
2594
 
 
 
 
2595	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
 
2596		if (flags & RTCF_LOCAL &&
2597		    !(dev_out->flags & IFF_LOOPBACK)) {
2598			rth->dst.output = ip_mc_output;
2599			RT_CACHE_STAT_INC(out_slow_mc);
2600		}
2601#ifdef CONFIG_IP_MROUTE
2602		if (type == RTN_MULTICAST) {
2603			if (IN_DEV_MFORWARD(in_dev) &&
2604			    !ipv4_is_local_multicast(fl4->daddr)) {
2605				rth->dst.input = ip_mr_input;
2606				rth->dst.output = ip_mc_output;
2607			}
2608		}
2609#endif
2610	}
2611
2612	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2613	lwtunnel_set_redirect(&rth->dst);
2614
2615	return rth;
2616}
2617
2618/*
2619 * Major route resolver routine.
 
2620 */
2621
2622struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2623					const struct sk_buff *skb)
2624{
2625	struct fib_result res = {
2626		.type		= RTN_UNSPEC,
2627		.fi		= NULL,
2628		.table		= NULL,
2629		.tclassid	= 0,
2630	};
2631	struct rtable *rth;
 
 
 
2632
2633	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2634	ip_rt_fix_tos(fl4);
 
 
2635
2636	rcu_read_lock();
2637	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2638	rcu_read_unlock();
2639
2640	return rth;
2641}
2642EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2643
2644struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2645					    struct fib_result *res,
2646					    const struct sk_buff *skb)
2647{
2648	struct net_device *dev_out = NULL;
2649	int orig_oif = fl4->flowi4_oif;
2650	unsigned int flags = 0;
2651	struct rtable *rth;
2652	int err;
2653
 
2654	if (fl4->saddr) {
 
2655		if (ipv4_is_multicast(fl4->saddr) ||
2656		    ipv4_is_lbcast(fl4->saddr) ||
2657		    ipv4_is_zeronet(fl4->saddr)) {
2658			rth = ERR_PTR(-EINVAL);
2659			goto out;
2660		}
2661
2662		rth = ERR_PTR(-ENETUNREACH);
2663
2664		/* I removed check for oif == dev_out->oif here.
2665		 * It was wrong for two reasons:
2666		 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2667		 *    is assigned to multiple interfaces.
2668		 * 2. Moreover, we are allowed to send packets with saddr
2669		 *    of another iface. --ANK
2670		 */
2671
2672		if (fl4->flowi4_oif == 0 &&
2673		    (ipv4_is_multicast(fl4->daddr) ||
2674		     ipv4_is_lbcast(fl4->daddr))) {
2675			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2676			dev_out = __ip_dev_find(net, fl4->saddr, false);
2677			if (!dev_out)
2678				goto out;
2679
2680			/* Special hack: user can direct multicasts
2681			 * and limited broadcast via necessary interface
2682			 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2683			 * This hack is not just for fun, it allows
2684			 * vic,vat and friends to work.
2685			 * They bind socket to loopback, set ttl to zero
2686			 * and expect that it will work.
2687			 * From the viewpoint of routing cache they are broken,
2688			 * because we are not allowed to build multicast path
2689			 * with loopback source addr (look, routing cache
2690			 * cannot know, that ttl is zero, so that packet
2691			 * will not leave this host and route is valid).
2692			 * Luckily, this hack is good workaround.
2693			 */
2694
2695			fl4->flowi4_oif = dev_out->ifindex;
2696			goto make_route;
2697		}
2698
2699		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2700			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2701			if (!__ip_dev_find(net, fl4->saddr, false))
2702				goto out;
2703		}
2704	}
2705
2706
2707	if (fl4->flowi4_oif) {
2708		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2709		rth = ERR_PTR(-ENODEV);
2710		if (!dev_out)
2711			goto out;
2712
2713		/* RACE: Check return value of inet_select_addr instead. */
2714		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2715			rth = ERR_PTR(-ENETUNREACH);
2716			goto out;
2717		}
2718		if (ipv4_is_local_multicast(fl4->daddr) ||
2719		    ipv4_is_lbcast(fl4->daddr) ||
2720		    fl4->flowi4_proto == IPPROTO_IGMP) {
2721			if (!fl4->saddr)
2722				fl4->saddr = inet_select_addr(dev_out, 0,
2723							      RT_SCOPE_LINK);
2724			goto make_route;
2725		}
2726		if (!fl4->saddr) {
2727			if (ipv4_is_multicast(fl4->daddr))
2728				fl4->saddr = inet_select_addr(dev_out, 0,
2729							      fl4->flowi4_scope);
2730			else if (!fl4->daddr)
2731				fl4->saddr = inet_select_addr(dev_out, 0,
2732							      RT_SCOPE_HOST);
2733		}
2734	}
2735
2736	if (!fl4->daddr) {
2737		fl4->daddr = fl4->saddr;
2738		if (!fl4->daddr)
2739			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2740		dev_out = net->loopback_dev;
2741		fl4->flowi4_oif = LOOPBACK_IFINDEX;
2742		res->type = RTN_LOCAL;
2743		flags |= RTCF_LOCAL;
2744		goto make_route;
2745	}
2746
2747	err = fib_lookup(net, fl4, res, 0);
2748	if (err) {
2749		res->fi = NULL;
2750		res->table = NULL;
2751		if (fl4->flowi4_oif &&
2752		    (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
2753			/* Apparently, routing tables are wrong. Assume,
2754			 * that the destination is on link.
2755			 *
2756			 * WHY? DW.
2757			 * Because we are allowed to send to iface
2758			 * even if it has NO routes and NO assigned
2759			 * addresses. When oif is specified, routing
2760			 * tables are looked up with only one purpose:
2761			 * to catch if destination is gatewayed, rather than
2762			 * direct. Moreover, if MSG_DONTROUTE is set,
2763			 * we send packet, ignoring both routing tables
2764			 * and ifaddr state. --ANK
2765			 *
2766			 *
2767			 * We could make it even if oif is unknown,
2768			 * likely IPv6, but we do not.
2769			 */
2770
2771			if (fl4->saddr == 0)
2772				fl4->saddr = inet_select_addr(dev_out, 0,
2773							      RT_SCOPE_LINK);
2774			res->type = RTN_UNICAST;
2775			goto make_route;
2776		}
2777		rth = ERR_PTR(err);
2778		goto out;
2779	}
2780
2781	if (res->type == RTN_LOCAL) {
2782		if (!fl4->saddr) {
2783			if (res->fi->fib_prefsrc)
2784				fl4->saddr = res->fi->fib_prefsrc;
2785			else
2786				fl4->saddr = fl4->daddr;
2787		}
2788
2789		/* L3 master device is the loopback for that domain */
2790		dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2791			net->loopback_dev;
2792
2793		/* make sure orig_oif points to fib result device even
2794		 * though packet rx/tx happens over loopback or l3mdev
2795		 */
2796		orig_oif = FIB_RES_OIF(*res);
2797
2798		fl4->flowi4_oif = dev_out->ifindex;
 
2799		flags |= RTCF_LOCAL;
2800		goto make_route;
2801	}
2802
2803	fib_select_path(net, res, fl4, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2804
2805	dev_out = FIB_RES_DEV(*res);
2806
2807make_route:
2808	rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
 
 
 
 
 
 
 
 
2809
2810out:
 
2811	return rth;
2812}
2813
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2814static struct dst_ops ipv4_dst_blackhole_ops = {
2815	.family			= AF_INET,
2816	.default_advmss		= ipv4_default_advmss,
2817	.neigh_lookup		= ipv4_neigh_lookup,
2818	.check			= dst_blackhole_check,
2819	.cow_metrics		= dst_blackhole_cow_metrics,
2820	.update_pmtu		= dst_blackhole_update_pmtu,
2821	.redirect		= dst_blackhole_redirect,
2822	.mtu			= dst_blackhole_mtu,
 
2823};
2824
2825struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2826{
2827	struct rtable *ort = dst_rtable(dst_orig);
2828	struct rtable *rt;
2829
2830	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
2831	if (rt) {
2832		struct dst_entry *new = &rt->dst;
2833
2834		new->__use = 1;
2835		new->input = dst_discard;
2836		new->output = dst_discard_out;
 
2837
2838		new->dev = net->loopback_dev;
2839		netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC);
2840
2841		rt->rt_is_input = ort->rt_is_input;
 
 
 
 
2842		rt->rt_iif = ort->rt_iif;
2843		rt->rt_pmtu = ort->rt_pmtu;
2844		rt->rt_mtu_locked = ort->rt_mtu_locked;
2845
2846		rt->rt_genid = rt_genid_ipv4(net);
2847		rt->rt_flags = ort->rt_flags;
2848		rt->rt_type = ort->rt_type;
2849		rt->rt_uses_gateway = ort->rt_uses_gateway;
2850		rt->rt_gw_family = ort->rt_gw_family;
2851		if (rt->rt_gw_family == AF_INET)
2852			rt->rt_gw4 = ort->rt_gw4;
2853		else if (rt->rt_gw_family == AF_INET6)
2854			rt->rt_gw6 = ort->rt_gw6;
 
 
 
 
 
 
2855	}
2856
2857	dst_release(dst_orig);
2858
2859	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2860}
2861
2862struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2863				    const struct sock *sk)
2864{
2865	struct rtable *rt = __ip_route_output_key(net, flp4);
2866
2867	if (IS_ERR(rt))
2868		return rt;
2869
2870	if (flp4->flowi4_proto) {
2871		flp4->flowi4_oif = rt->dst.dev->ifindex;
2872		rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
2873						  flowi4_to_flowi(flp4),
2874						  sk, 0));
2875	}
2876
2877	return rt;
2878}
2879EXPORT_SYMBOL_GPL(ip_route_output_flow);
2880
2881/* called with rcu_read_lock held */
2882static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2883			struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2884			struct sk_buff *skb, u32 portid, u32 seq,
2885			unsigned int flags)
2886{
 
2887	struct rtmsg *r;
2888	struct nlmsghdr *nlh;
2889	unsigned long expires = 0;
2890	u32 error;
2891	u32 metrics[RTAX_MAX];
2892
2893	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2894	if (!nlh)
2895		return -EMSGSIZE;
2896
2897	r = nlmsg_data(nlh);
2898	r->rtm_family	 = AF_INET;
2899	r->rtm_dst_len	= 32;
2900	r->rtm_src_len	= 0;
2901	r->rtm_tos	= fl4 ? fl4->flowi4_tos : 0;
2902	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
2903	if (nla_put_u32(skb, RTA_TABLE, table_id))
2904		goto nla_put_failure;
2905	r->rtm_type	= rt->rt_type;
2906	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2907	r->rtm_protocol = RTPROT_UNSPEC;
2908	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2909	if (rt->rt_flags & RTCF_NOTIFY)
2910		r->rtm_flags |= RTM_F_NOTIFY;
2911	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2912		r->rtm_flags |= RTCF_DOREDIRECT;
2913
2914	if (nla_put_in_addr(skb, RTA_DST, dst))
2915		goto nla_put_failure;
2916	if (src) {
2917		r->rtm_src_len = 32;
2918		if (nla_put_in_addr(skb, RTA_SRC, src))
2919			goto nla_put_failure;
2920	}
2921	if (rt->dst.dev &&
2922	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2923		goto nla_put_failure;
2924	if (rt->dst.lwtstate &&
2925	    lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2926		goto nla_put_failure;
2927#ifdef CONFIG_IP_ROUTE_CLASSID
2928	if (rt->dst.tclassid &&
2929	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2930		goto nla_put_failure;
2931#endif
2932	if (fl4 && !rt_is_input_route(rt) &&
2933	    fl4->saddr != src) {
2934		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2935			goto nla_put_failure;
2936	}
2937	if (rt->rt_uses_gateway) {
2938		if (rt->rt_gw_family == AF_INET &&
2939		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2940			goto nla_put_failure;
2941		} else if (rt->rt_gw_family == AF_INET6) {
2942			int alen = sizeof(struct in6_addr);
2943			struct nlattr *nla;
2944			struct rtvia *via;
2945
2946			nla = nla_reserve(skb, RTA_VIA, alen + 2);
2947			if (!nla)
2948				goto nla_put_failure;
2949
2950			via = nla_data(nla);
2951			via->rtvia_family = AF_INET6;
2952			memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2953		}
2954	}
 
 
 
2955
2956	expires = rt->dst.expires;
2957	if (expires) {
2958		unsigned long now = jiffies;
2959
2960		if (time_before(now, expires))
2961			expires -= now;
2962		else
2963			expires = 0;
2964	}
2965
2966	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2967	if (rt->rt_pmtu && expires)
2968		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2969	if (rt->rt_mtu_locked && expires)
2970		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2971	if (rtnetlink_put_metrics(skb, metrics) < 0)
2972		goto nla_put_failure;
2973
2974	if (fl4) {
2975		if (fl4->flowi4_mark &&
2976		    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2977			goto nla_put_failure;
 
 
 
 
 
 
 
 
 
 
 
 
2978
2979		if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2980		    nla_put_u32(skb, RTA_UID,
2981				from_kuid_munged(current_user_ns(),
2982						 fl4->flowi4_uid)))
2983			goto nla_put_failure;
2984
2985		if (rt_is_input_route(rt)) {
2986#ifdef CONFIG_IP_MROUTE
2987			if (ipv4_is_multicast(dst) &&
2988			    !ipv4_is_local_multicast(dst) &&
2989			    IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
2990				int err = ipmr_get_route(net, skb,
2991							 fl4->saddr, fl4->daddr,
2992							 r, portid);
2993
2994				if (err <= 0) {
 
 
 
 
 
 
2995					if (err == 0)
2996						return 0;
2997					goto nla_put_failure;
 
 
 
 
2998				}
2999			} else
 
3000#endif
3001				if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
3002					goto nla_put_failure;
3003		}
3004	}
3005
3006	error = rt->dst.error;
3007
3008	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3009		goto nla_put_failure;
3010
3011	nlmsg_end(skb, nlh);
3012	return 0;
3013
3014nla_put_failure:
3015	nlmsg_cancel(skb, nlh);
3016	return -EMSGSIZE;
3017}
3018
3019static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3020			    struct netlink_callback *cb, u32 table_id,
3021			    struct fnhe_hash_bucket *bucket, int genid,
3022			    int *fa_index, int fa_start, unsigned int flags)
3023{
3024	int i;
3025
3026	for (i = 0; i < FNHE_HASH_SIZE; i++) {
3027		struct fib_nh_exception *fnhe;
3028
3029		for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3030		     fnhe = rcu_dereference(fnhe->fnhe_next)) {
3031			struct rtable *rt;
3032			int err;
3033
3034			if (*fa_index < fa_start)
3035				goto next;
3036
3037			if (fnhe->fnhe_genid != genid)
3038				goto next;
3039
3040			if (fnhe->fnhe_expires &&
3041			    time_after(jiffies, fnhe->fnhe_expires))
3042				goto next;
3043
3044			rt = rcu_dereference(fnhe->fnhe_rth_input);
3045			if (!rt)
3046				rt = rcu_dereference(fnhe->fnhe_rth_output);
3047			if (!rt)
3048				goto next;
3049
3050			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3051					   table_id, NULL, skb,
3052					   NETLINK_CB(cb->skb).portid,
3053					   cb->nlh->nlmsg_seq, flags);
3054			if (err)
3055				return err;
3056next:
3057			(*fa_index)++;
3058		}
3059	}
3060
3061	return 0;
3062}
3063
3064int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3065		       u32 table_id, struct fib_info *fi,
3066		       int *fa_index, int fa_start, unsigned int flags)
3067{
3068	struct net *net = sock_net(cb->skb->sk);
3069	int nhsel, genid = fnhe_genid(net);
3070
3071	for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3072		struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3073		struct fnhe_hash_bucket *bucket;
3074		int err;
3075
3076		if (nhc->nhc_flags & RTNH_F_DEAD)
3077			continue;
3078
3079		rcu_read_lock();
3080		bucket = rcu_dereference(nhc->nhc_exceptions);
3081		err = 0;
3082		if (bucket)
3083			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3084					       genid, fa_index, fa_start,
3085					       flags);
3086		rcu_read_unlock();
3087		if (err)
3088			return err;
3089	}
3090
3091	return 0;
3092}
3093
3094static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3095						   u8 ip_proto, __be16 sport,
3096						   __be16 dport)
3097{
3098	struct sk_buff *skb;
3099	struct iphdr *iph;
3100
3101	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3102	if (!skb)
3103		return NULL;
3104
3105	/* Reserve room for dummy headers, this skb can pass
3106	 * through good chunk of routing engine.
3107	 */
3108	skb_reset_mac_header(skb);
3109	skb_reset_network_header(skb);
3110	skb->protocol = htons(ETH_P_IP);
3111	iph = skb_put(skb, sizeof(struct iphdr));
3112	iph->protocol = ip_proto;
3113	iph->saddr = src;
3114	iph->daddr = dst;
3115	iph->version = 0x4;
3116	iph->frag_off = 0;
3117	iph->ihl = 0x5;
3118	skb_set_transport_header(skb, skb->len);
3119
3120	switch (iph->protocol) {
3121	case IPPROTO_UDP: {
3122		struct udphdr *udph;
3123
3124		udph = skb_put_zero(skb, sizeof(struct udphdr));
3125		udph->source = sport;
3126		udph->dest = dport;
3127		udph->len = htons(sizeof(struct udphdr));
3128		udph->check = 0;
3129		break;
3130	}
3131	case IPPROTO_TCP: {
3132		struct tcphdr *tcph;
3133
3134		tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3135		tcph->source	= sport;
3136		tcph->dest	= dport;
3137		tcph->doff	= sizeof(struct tcphdr) / 4;
3138		tcph->rst = 1;
3139		tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3140					    src, dst, 0);
3141		break;
3142	}
3143	case IPPROTO_ICMP: {
3144		struct icmphdr *icmph;
3145
3146		icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3147		icmph->type = ICMP_ECHO;
3148		icmph->code = 0;
3149	}
3150	}
3151
3152	return skb;
3153}
3154
3155static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3156				       const struct nlmsghdr *nlh,
3157				       struct nlattr **tb,
3158				       struct netlink_ext_ack *extack)
3159{
 
3160	struct rtmsg *rtm;
3161	int i, err;
3162
3163	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3164		NL_SET_ERR_MSG(extack,
3165			       "ipv4: Invalid header for route get request");
3166		return -EINVAL;
3167	}
3168
3169	if (!netlink_strict_get_check(skb))
3170		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3171					      rtm_ipv4_policy, extack);
3172
3173	rtm = nlmsg_data(nlh);
3174	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3175	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3176	    rtm->rtm_table || rtm->rtm_protocol ||
3177	    rtm->rtm_scope || rtm->rtm_type) {
3178		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3179		return -EINVAL;
3180	}
3181
3182	if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3183			       RTM_F_LOOKUP_TABLE |
3184			       RTM_F_FIB_MATCH)) {
3185		NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3186		return -EINVAL;
3187	}
3188
3189	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3190					    rtm_ipv4_policy, extack);
3191	if (err)
3192		return err;
3193
3194	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3195	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3196		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3197		return -EINVAL;
3198	}
3199
3200	for (i = 0; i <= RTA_MAX; i++) {
3201		if (!tb[i])
3202			continue;
3203
3204		switch (i) {
3205		case RTA_IIF:
3206		case RTA_OIF:
3207		case RTA_SRC:
3208		case RTA_DST:
3209		case RTA_IP_PROTO:
3210		case RTA_SPORT:
3211		case RTA_DPORT:
3212		case RTA_MARK:
3213		case RTA_UID:
3214			break;
3215		default:
3216			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3217			return -EINVAL;
3218		}
3219	}
3220
3221	return 0;
3222}
3223
3224static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3225			     struct netlink_ext_ack *extack)
3226{
3227	struct net *net = sock_net(in_skb->sk);
3228	struct nlattr *tb[RTA_MAX+1];
3229	u32 table_id = RT_TABLE_MAIN;
3230	__be16 sport = 0, dport = 0;
3231	struct fib_result res = {};
3232	u8 ip_proto = IPPROTO_UDP;
3233	struct rtable *rt = NULL;
3234	struct sk_buff *skb;
3235	struct rtmsg *rtm;
3236	struct flowi4 fl4 = {};
3237	__be32 dst = 0;
3238	__be32 src = 0;
3239	kuid_t uid;
3240	u32 iif;
3241	int err;
3242	int mark;
 
3243
3244	err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3245	if (err < 0)
3246		return err;
3247
3248	rtm = nlmsg_data(nlh);
3249	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3250	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3251	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3252	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3253	if (tb[RTA_UID])
3254		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3255	else
3256		uid = (iif ? INVALID_UID : current_uid());
3257
3258	if (tb[RTA_IP_PROTO]) {
3259		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3260						  &ip_proto, AF_INET, extack);
3261		if (err)
3262			return err;
3263	}
3264
3265	if (tb[RTA_SPORT])
3266		sport = nla_get_be16(tb[RTA_SPORT]);
 
 
 
3267
3268	if (tb[RTA_DPORT])
3269		dport = nla_get_be16(tb[RTA_DPORT]);
 
3270
3271	skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3272	if (!skb)
3273		return -ENOBUFS;
3274
3275	fl4.daddr = dst;
3276	fl4.saddr = src;
3277	fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3278	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3279	fl4.flowi4_mark = mark;
3280	fl4.flowi4_uid = uid;
3281	if (sport)
3282		fl4.fl4_sport = sport;
3283	if (dport)
3284		fl4.fl4_dport = dport;
3285	fl4.flowi4_proto = ip_proto;
3286
3287	rcu_read_lock();
3288
3289	if (iif) {
3290		struct net_device *dev;
3291
3292		dev = dev_get_by_index_rcu(net, iif);
3293		if (!dev) {
3294			err = -ENODEV;
3295			goto errout_rcu;
3296		}
3297
3298		fl4.flowi4_iif = iif; /* for rt_fill_info */
3299		skb->dev	= dev;
3300		skb->mark	= mark;
3301		err = ip_route_input_rcu(skb, dst, src,
3302					 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3303					 &res);
3304
3305		rt = skb_rtable(skb);
3306		if (err == 0 && rt->dst.error)
3307			err = -rt->dst.error;
3308	} else {
3309		fl4.flowi4_iif = LOOPBACK_IFINDEX;
3310		skb->dev = net->loopback_dev;
3311		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
 
 
 
 
 
 
3312		err = 0;
3313		if (IS_ERR(rt))
3314			err = PTR_ERR(rt);
3315		else
3316			skb_dst_set(skb, &rt->dst);
3317	}
3318
3319	if (err)
3320		goto errout_rcu;
3321
 
3322	if (rtm->rtm_flags & RTM_F_NOTIFY)
3323		rt->rt_flags |= RTCF_NOTIFY;
3324
3325	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3326		table_id = res.table ? res.table->tb_id : 0;
 
 
 
 
 
 
 
 
 
 
 
3327
3328	/* reset skb for netlink reply msg */
3329	skb_trim(skb, 0);
3330	skb_reset_network_header(skb);
3331	skb_reset_transport_header(skb);
3332	skb_reset_mac_header(skb);
 
3333
3334	if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3335		struct fib_rt_info fri;
3336
3337		if (!res.fi) {
3338			err = fib_props[res.type].error;
3339			if (!err)
3340				err = -EHOSTUNREACH;
3341			goto errout_rcu;
3342		}
3343		fri.fi = res.fi;
3344		fri.tb_id = table_id;
3345		fri.dst = res.prefix;
3346		fri.dst_len = res.prefixlen;
3347		fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
3348		fri.type = rt->rt_type;
3349		fri.offload = 0;
3350		fri.trap = 0;
3351		fri.offload_failed = 0;
3352		if (res.fa_head) {
3353			struct fib_alias *fa;
3354
3355			hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3356				u8 slen = 32 - fri.dst_len;
3357
3358				if (fa->fa_slen == slen &&
3359				    fa->tb_id == fri.tb_id &&
3360				    fa->fa_dscp == fri.dscp &&
3361				    fa->fa_info == res.fi &&
3362				    fa->fa_type == fri.type) {
3363					fri.offload = READ_ONCE(fa->offload);
3364					fri.trap = READ_ONCE(fa->trap);
3365					fri.offload_failed =
3366						READ_ONCE(fa->offload_failed);
3367					break;
3368				}
3369			}
 
3370		}
3371		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3372				    nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3373	} else {
3374		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3375				   NETLINK_CB(in_skb).portid,
3376				   nlh->nlmsg_seq, 0);
3377	}
3378	if (err < 0)
3379		goto errout_rcu;
3380
3381	rcu_read_unlock();
3382
3383	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3384
3385errout_free:
3386	return err;
3387errout_rcu:
3388	rcu_read_unlock();
3389	kfree_skb(skb);
3390	goto errout_free;
3391}
3392
3393void ip_rt_multicast_event(struct in_device *in_dev)
3394{
3395	rt_cache_flush(dev_net(in_dev->dev));
3396}
3397
3398#ifdef CONFIG_SYSCTL
3399static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3400static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
3401static int ip_rt_gc_elasticity __read_mostly	= 8;
3402static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
3403
3404static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3405		void *buffer, size_t *lenp, loff_t *ppos)
3406{
3407	struct net *net = (struct net *)__ctl->extra1;
 
 
 
 
 
 
 
3408
3409	if (write) {
3410		rt_cache_flush(net);
3411		fnhe_genid_bump(net);
3412		return 0;
3413	}
3414
3415	return -EINVAL;
3416}
3417
3418static struct ctl_table ipv4_route_table[] = {
3419	{
3420		.procname	= "gc_thresh",
3421		.data		= &ipv4_dst_ops.gc_thresh,
3422		.maxlen		= sizeof(int),
3423		.mode		= 0644,
3424		.proc_handler	= proc_dointvec,
3425	},
3426	{
3427		.procname	= "max_size",
3428		.data		= &ip_rt_max_size,
3429		.maxlen		= sizeof(int),
3430		.mode		= 0644,
3431		.proc_handler	= proc_dointvec,
3432	},
3433	{
3434		/*  Deprecated. Use gc_min_interval_ms */
3435
3436		.procname	= "gc_min_interval",
3437		.data		= &ip_rt_gc_min_interval,
3438		.maxlen		= sizeof(int),
3439		.mode		= 0644,
3440		.proc_handler	= proc_dointvec_jiffies,
3441	},
3442	{
3443		.procname	= "gc_min_interval_ms",
3444		.data		= &ip_rt_gc_min_interval,
3445		.maxlen		= sizeof(int),
3446		.mode		= 0644,
3447		.proc_handler	= proc_dointvec_ms_jiffies,
3448	},
3449	{
3450		.procname	= "gc_timeout",
3451		.data		= &ip_rt_gc_timeout,
3452		.maxlen		= sizeof(int),
3453		.mode		= 0644,
3454		.proc_handler	= proc_dointvec_jiffies,
3455	},
3456	{
3457		.procname	= "gc_interval",
3458		.data		= &ip_rt_gc_interval,
3459		.maxlen		= sizeof(int),
3460		.mode		= 0644,
3461		.proc_handler	= proc_dointvec_jiffies,
3462	},
3463	{
3464		.procname	= "redirect_load",
3465		.data		= &ip_rt_redirect_load,
3466		.maxlen		= sizeof(int),
3467		.mode		= 0644,
3468		.proc_handler	= proc_dointvec,
3469	},
3470	{
3471		.procname	= "redirect_number",
3472		.data		= &ip_rt_redirect_number,
3473		.maxlen		= sizeof(int),
3474		.mode		= 0644,
3475		.proc_handler	= proc_dointvec,
3476	},
3477	{
3478		.procname	= "redirect_silence",
3479		.data		= &ip_rt_redirect_silence,
3480		.maxlen		= sizeof(int),
3481		.mode		= 0644,
3482		.proc_handler	= proc_dointvec,
3483	},
3484	{
3485		.procname	= "error_cost",
3486		.data		= &ip_rt_error_cost,
3487		.maxlen		= sizeof(int),
3488		.mode		= 0644,
3489		.proc_handler	= proc_dointvec,
3490	},
3491	{
3492		.procname	= "error_burst",
3493		.data		= &ip_rt_error_burst,
3494		.maxlen		= sizeof(int),
3495		.mode		= 0644,
3496		.proc_handler	= proc_dointvec,
3497	},
3498	{
3499		.procname	= "gc_elasticity",
3500		.data		= &ip_rt_gc_elasticity,
3501		.maxlen		= sizeof(int),
3502		.mode		= 0644,
3503		.proc_handler	= proc_dointvec,
3504	},
3505	{ }
3506};
3507
3508static const char ipv4_route_flush_procname[] = "flush";
3509
3510static struct ctl_table ipv4_route_netns_table[] = {
3511	{
3512		.procname	= ipv4_route_flush_procname,
 
3513		.maxlen		= sizeof(int),
3514		.mode		= 0200,
3515		.proc_handler	= ipv4_sysctl_rtcache_flush,
3516	},
3517	{
3518		.procname       = "min_pmtu",
3519		.data           = &init_net.ipv4.ip_rt_min_pmtu,
3520		.maxlen         = sizeof(int),
3521		.mode           = 0644,
3522		.proc_handler   = proc_dointvec_minmax,
3523		.extra1         = &ip_min_valid_pmtu,
3524	},
3525	{
3526		.procname       = "mtu_expires",
3527		.data           = &init_net.ipv4.ip_rt_mtu_expires,
3528		.maxlen         = sizeof(int),
3529		.mode           = 0644,
3530		.proc_handler   = proc_dointvec_jiffies,
3531	},
 
 
 
 
3532	{
3533		.procname   = "min_adv_mss",
3534		.data       = &init_net.ipv4.ip_rt_min_advmss,
3535		.maxlen     = sizeof(int),
3536		.mode       = 0644,
3537		.proc_handler   = proc_dointvec,
3538	},
3539	{ },
3540};
3541
3542static __net_init int sysctl_route_net_init(struct net *net)
3543{
3544	struct ctl_table *tbl;
3545	size_t table_size = ARRAY_SIZE(ipv4_route_netns_table);
3546
3547	tbl = ipv4_route_netns_table;
3548	if (!net_eq(net, &init_net)) {
3549		int i;
3550
3551		tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3552		if (!tbl)
3553			goto err_dup;
3554
3555		/* Don't export non-whitelisted sysctls to unprivileged users */
3556		if (net->user_ns != &init_user_ns) {
3557			if (tbl[0].procname != ipv4_route_flush_procname) {
3558				tbl[0].procname = NULL;
3559				table_size = 0;
3560			}
3561		}
3562
3563		/* Update the variables to point into the current struct net
3564		 * except for the first element flush
3565		 */
3566		for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++)
3567			tbl[i].data += (void *)net - (void *)&init_net;
3568	}
3569	tbl[0].extra1 = net;
3570
3571	net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route",
3572						     tbl, table_size);
3573	if (!net->ipv4.route_hdr)
3574		goto err_reg;
3575	return 0;
3576
3577err_reg:
3578	if (tbl != ipv4_route_netns_table)
3579		kfree(tbl);
3580err_dup:
3581	return -ENOMEM;
3582}
3583
3584static __net_exit void sysctl_route_net_exit(struct net *net)
3585{
3586	struct ctl_table *tbl;
3587
3588	tbl = net->ipv4.route_hdr->ctl_table_arg;
3589	unregister_net_sysctl_table(net->ipv4.route_hdr);
3590	BUG_ON(tbl == ipv4_route_netns_table);
3591	kfree(tbl);
3592}
3593
3594static __net_initdata struct pernet_operations sysctl_route_ops = {
3595	.init = sysctl_route_net_init,
3596	.exit = sysctl_route_net_exit,
3597};
3598#endif
3599
3600static __net_init int netns_ip_rt_init(struct net *net)
3601{
3602	/* Set default value for namespaceified sysctls */
3603	net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
3604	net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
3605	net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
3606	return 0;
3607}
3608
3609static struct pernet_operations __net_initdata ip_rt_ops = {
3610	.init = netns_ip_rt_init,
3611};
3612
3613static __net_init int rt_genid_init(struct net *net)
3614{
3615	atomic_set(&net->ipv4.rt_genid, 0);
3616	atomic_set(&net->fnhe_genid, 0);
3617	atomic_set(&net->ipv4.dev_addr_genid, get_random_u32());
 
3618	return 0;
3619}
3620
3621static __net_initdata struct pernet_operations rt_genid_ops = {
3622	.init = rt_genid_init,
3623};
3624
3625static int __net_init ipv4_inetpeer_init(struct net *net)
3626{
3627	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3628
3629	if (!bp)
3630		return -ENOMEM;
3631	inet_peer_base_init(bp);
3632	net->ipv4.peers = bp;
3633	return 0;
3634}
3635
3636static void __net_exit ipv4_inetpeer_exit(struct net *net)
 
3637{
3638	struct inet_peer_base *bp = net->ipv4.peers;
3639
3640	net->ipv4.peers = NULL;
3641	inetpeer_invalidate_tree(bp);
3642	kfree(bp);
3643}
3644
3645static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3646	.init	=	ipv4_inetpeer_init,
3647	.exit	=	ipv4_inetpeer_exit,
3648};
3649
3650#ifdef CONFIG_IP_ROUTE_CLASSID
3651struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3652#endif /* CONFIG_IP_ROUTE_CLASSID */
3653
3654int __init ip_rt_init(void)
3655{
3656	void *idents_hash;
3657	int cpu;
3658
3659	/* For modern hosts, this will use 2 MB of memory */
3660	idents_hash = alloc_large_system_hash("IP idents",
3661					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
3662					      0,
3663					      16, /* one bucket per 64 KB */
3664					      HASH_ZERO,
3665					      NULL,
3666					      &ip_idents_mask,
3667					      2048,
3668					      256*1024);
3669
3670	ip_idents = idents_hash;
3671
3672	get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3673
3674	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3675
3676	for_each_possible_cpu(cpu) {
3677		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3678
3679		INIT_LIST_HEAD(&ul->head);
3680		INIT_LIST_HEAD(&ul->quarantine);
3681		spin_lock_init(&ul->lock);
3682	}
3683#ifdef CONFIG_IP_ROUTE_CLASSID
3684	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3685	if (!ip_rt_acct)
3686		panic("IP: failed to allocate ip_rt_acct\n");
3687#endif
3688
3689	ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
3690					      SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 
3691
3692	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3693
3694	if (dst_entries_init(&ipv4_dst_ops) < 0)
3695		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3696
3697	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3698		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3699
3700	ipv4_dst_ops.gc_thresh = ~0;
3701	ip_rt_max_size = INT_MAX;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3702
3703	devinet_init();
3704	ip_fib_init();
3705
 
 
 
 
 
3706	if (ip_rt_proc_init())
3707		pr_err("Unable to create route proc files\n");
3708#ifdef CONFIG_XFRM
3709	xfrm_init();
3710	xfrm4_init();
3711#endif
3712	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3713		      RTNL_FLAG_DOIT_UNLOCKED);
3714
3715#ifdef CONFIG_SYSCTL
3716	register_pernet_subsys(&sysctl_route_ops);
3717#endif
3718	register_pernet_subsys(&ip_rt_ops);
3719	register_pernet_subsys(&rt_genid_ops);
3720	register_pernet_subsys(&ipv4_inetpeer_ops);
3721	return 0;
3722}
3723
3724#ifdef CONFIG_SYSCTL
3725/*
3726 * We really need to sanitize the damn ipv4 init order, then all
3727 * this nonsense will go away.
3728 */
3729void __init ip_static_sysctl_init(void)
3730{
3731	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3732}
3733#endif