Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *	Generic address resolution entity
   3 *
   4 *	Authors:
   5 *	Pedro Roque		<roque@di.fc.ul.pt>
   6 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   7 *
   8 *	This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 *
  13 *	Fixes:
  14 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  15 *	Harald Welte		Add neighbour cache statistics like rtstat
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/socket.h>
  25#include <linux/netdevice.h>
  26#include <linux/proc_fs.h>
  27#ifdef CONFIG_SYSCTL
  28#include <linux/sysctl.h>
  29#endif
  30#include <linux/times.h>
  31#include <net/net_namespace.h>
  32#include <net/neighbour.h>
 
  33#include <net/dst.h>
  34#include <net/sock.h>
  35#include <net/netevent.h>
  36#include <net/netlink.h>
  37#include <linux/rtnetlink.h>
  38#include <linux/random.h>
  39#include <linux/string.h>
  40#include <linux/log2.h>
  41#include <linux/inetdevice.h>
  42#include <net/addrconf.h>
  43
  44#define DEBUG
 
  45#define NEIGH_DEBUG 1
  46#define neigh_dbg(level, fmt, ...)		\
  47do {						\
  48	if (level <= NEIGH_DEBUG)		\
  49		pr_debug(fmt, ##__VA_ARGS__);	\
  50} while (0)
  51
  52#define PNEIGH_HASHMASK		0xF
  53
  54static void neigh_timer_handler(unsigned long arg);
  55static void __neigh_notify(struct neighbour *n, int type, int flags);
  56static void neigh_update_notify(struct neighbour *neigh);
  57static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
 
 
  58
  59#ifdef CONFIG_PROC_FS
  60static const struct file_operations neigh_stat_seq_fops;
  61#endif
  62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63/*
  64   Neighbour hash table buckets are protected with rwlock tbl->lock.
  65
  66   - All the scans/updates to hash buckets MUST be made under this lock.
  67   - NOTHING clever should be made under this lock: no callbacks
  68     to protocol backends, no attempts to send something to network.
  69     It will result in deadlocks, if backend/driver wants to use neighbour
  70     cache.
  71   - If the entry requires some non-trivial actions, increase
  72     its reference count and release table lock.
  73
  74   Neighbour entries are protected:
  75   - with reference count.
  76   - with rwlock neigh->lock
  77
  78   Reference count prevents destruction.
  79
  80   neigh->lock mainly serializes ll address data and its validity state.
  81   However, the same lock is used to protect another entry fields:
  82    - timer
  83    - resolution queue
  84
  85   Again, nothing clever shall be made under neigh->lock,
  86   the most complicated procedure, which we allow is dev->hard_header.
  87   It is supposed, that dev->hard_header is simplistic and does
  88   not make callbacks to neighbour tables.
  89 */
  90
  91static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
  92{
  93	kfree_skb(skb);
  94	return -ENETDOWN;
  95}
  96
  97static void neigh_cleanup_and_release(struct neighbour *neigh)
  98{
  99	if (neigh->parms->neigh_cleanup)
 100		neigh->parms->neigh_cleanup(neigh);
 101
 102	__neigh_notify(neigh, RTM_DELNEIGH, 0);
 103	neigh_release(neigh);
 104}
 105
 106/*
 107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 108 * It corresponds to default IPv6 settings and is not overridable,
 109 * because it is really reasonable choice.
 110 */
 111
 112unsigned long neigh_rand_reach_time(unsigned long base)
 113{
 114	return base ? (prandom_u32() % base) + (base >> 1) : 0;
 115}
 116EXPORT_SYMBOL(neigh_rand_reach_time);
 117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118
 119static int neigh_forced_gc(struct neigh_table *tbl)
 120{
 
 
 
 
 
 121	int shrunk = 0;
 122	int i;
 123	struct neigh_hash_table *nht;
 124
 125	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 126
 127	write_lock_bh(&tbl->lock);
 128	nht = rcu_dereference_protected(tbl->nht,
 129					lockdep_is_held(&tbl->lock));
 130	for (i = 0; i < (1 << nht->hash_shift); i++) {
 131		struct neighbour *n;
 132		struct neighbour __rcu **np;
 133
 134		np = &nht->hash_buckets[i];
 135		while ((n = rcu_dereference_protected(*np,
 136					lockdep_is_held(&tbl->lock))) != NULL) {
 137			/* Neighbour record may be discarded if:
 138			 * - nobody refers to it.
 139			 * - it is not permanent
 140			 */
 141			write_lock(&n->lock);
 142			if (atomic_read(&n->refcnt) == 1 &&
 143			    !(n->nud_state & NUD_PERMANENT)) {
 144				rcu_assign_pointer(*np,
 145					rcu_dereference_protected(n->next,
 146						  lockdep_is_held(&tbl->lock)));
 147				n->dead = 1;
 148				shrunk	= 1;
 149				write_unlock(&n->lock);
 150				neigh_cleanup_and_release(n);
 151				continue;
 152			}
 153			write_unlock(&n->lock);
 154			np = &n->next;
 
 
 
 
 
 
 
 
 
 155		}
 156	}
 157
 158	tbl->last_flush = jiffies;
 159
 160	write_unlock_bh(&tbl->lock);
 161
 162	return shrunk;
 163}
 164
 165static void neigh_add_timer(struct neighbour *n, unsigned long when)
 166{
 
 
 
 
 
 
 167	neigh_hold(n);
 
 
 
 
 168	if (unlikely(mod_timer(&n->timer, when))) {
 169		printk("NEIGH: BUG, double timer add, state is %x\n",
 170		       n->nud_state);
 171		dump_stack();
 172	}
 173}
 174
 175static int neigh_del_timer(struct neighbour *n)
 176{
 177	if ((n->nud_state & NUD_IN_TIMER) &&
 178	    del_timer(&n->timer)) {
 179		neigh_release(n);
 180		return 1;
 181	}
 182	return 0;
 183}
 184
 185static void pneigh_queue_purge(struct sk_buff_head *list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 186{
 
 
 187	struct sk_buff *skb;
 188
 189	while ((skb = skb_dequeue(list)) != NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190		dev_put(skb->dev);
 191		kfree_skb(skb);
 192	}
 193}
 194
 195static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 
 196{
 197	int i;
 198	struct neigh_hash_table *nht;
 199
 200	nht = rcu_dereference_protected(tbl->nht,
 201					lockdep_is_held(&tbl->lock));
 202
 203	for (i = 0; i < (1 << nht->hash_shift); i++) {
 204		struct neighbour *n;
 205		struct neighbour __rcu **np = &nht->hash_buckets[i];
 206
 207		while ((n = rcu_dereference_protected(*np,
 208					lockdep_is_held(&tbl->lock))) != NULL) {
 209			if (dev && n->dev != dev) {
 210				np = &n->next;
 211				continue;
 212			}
 213			rcu_assign_pointer(*np,
 214				   rcu_dereference_protected(n->next,
 215						lockdep_is_held(&tbl->lock)));
 216			write_lock(&n->lock);
 217			neigh_del_timer(n);
 218			n->dead = 1;
 219
 220			if (atomic_read(&n->refcnt) != 1) {
 221				/* The most unpleasant situation.
 222				   We must destroy neighbour entry,
 223				   but someone still uses it.
 224
 225				   The destroy will be delayed until
 226				   the last user releases us, but
 227				   we must kill timers etc. and move
 228				   it to safe state.
 229				 */
 230				__skb_queue_purge(&n->arp_queue);
 231				n->arp_queue_len_bytes = 0;
 232				n->output = neigh_blackhole;
 233				if (n->nud_state & NUD_VALID)
 234					n->nud_state = NUD_NOARP;
 235				else
 236					n->nud_state = NUD_NONE;
 237				neigh_dbg(2, "neigh %p is stray\n", n);
 238			}
 239			write_unlock(&n->lock);
 240			neigh_cleanup_and_release(n);
 
 
 241		}
 
 
 242	}
 243}
 244
 245void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 246{
 247	write_lock_bh(&tbl->lock);
 248	neigh_flush_dev(tbl, dev);
 249	write_unlock_bh(&tbl->lock);
 250}
 251EXPORT_SYMBOL(neigh_changeaddr);
 252
 253int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 
 254{
 255	write_lock_bh(&tbl->lock);
 256	neigh_flush_dev(tbl, dev);
 257	pneigh_ifdown(tbl, dev);
 258	write_unlock_bh(&tbl->lock);
 
 
 
 
 
 259
 260	del_timer_sync(&tbl->proxy_timer);
 261	pneigh_queue_purge(&tbl->proxy_queue);
 
 
 
 
 
 
 
 
 262	return 0;
 263}
 264EXPORT_SYMBOL(neigh_ifdown);
 265
 266static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
 
 
 267{
 268	struct neighbour *n = NULL;
 269	unsigned long now = jiffies;
 270	int entries;
 
 
 
 271
 272	entries = atomic_inc_return(&tbl->entries) - 1;
 273	if (entries >= tbl->gc_thresh3 ||
 274	    (entries >= tbl->gc_thresh2 &&
 275	     time_after(now, tbl->last_flush + 5 * HZ))) {
 276		if (!neigh_forced_gc(tbl) &&
 277		    entries >= tbl->gc_thresh3) {
 278			net_info_ratelimited("%s: neighbor table overflow!\n",
 279					     tbl->id);
 280			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 281			goto out_entries;
 282		}
 283	}
 284
 
 285	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 286	if (!n)
 287		goto out_entries;
 288
 289	__skb_queue_head_init(&n->arp_queue);
 290	rwlock_init(&n->lock);
 291	seqlock_init(&n->ha_lock);
 292	n->updated	  = n->used = now;
 293	n->nud_state	  = NUD_NONE;
 294	n->output	  = neigh_blackhole;
 
 295	seqlock_init(&n->hh.hh_lock);
 296	n->parms	  = neigh_parms_clone(&tbl->parms);
 297	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
 298
 299	NEIGH_CACHE_STAT_INC(tbl, allocs);
 300	n->tbl		  = tbl;
 301	atomic_set(&n->refcnt, 1);
 302	n->dead		  = 1;
 
 
 
 
 303out:
 304	return n;
 305
 306out_entries:
 307	atomic_dec(&tbl->entries);
 
 308	goto out;
 309}
 310
 311static void neigh_get_hash_rnd(u32 *x)
 312{
 313	get_random_bytes(x, sizeof(*x));
 314	*x |= 1;
 315}
 316
 317static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 318{
 319	size_t size = (1 << shift) * sizeof(struct neighbour *);
 
 320	struct neigh_hash_table *ret;
 321	struct neighbour __rcu **buckets;
 322	int i;
 323
 324	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 325	if (!ret)
 326		return NULL;
 327	if (size <= PAGE_SIZE)
 328		buckets = kzalloc(size, GFP_ATOMIC);
 329	else
 330		buckets = (struct neighbour __rcu **)
 331			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 332					   get_order(size));
 333	if (!buckets) {
 334		kfree(ret);
 335		return NULL;
 336	}
 337	ret->hash_buckets = buckets;
 338	ret->hash_shift = shift;
 339	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 340		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 341	return ret;
 342}
 343
 344static void neigh_hash_free_rcu(struct rcu_head *head)
 345{
 346	struct neigh_hash_table *nht = container_of(head,
 347						    struct neigh_hash_table,
 348						    rcu);
 349	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 350	struct neighbour __rcu **buckets = nht->hash_buckets;
 351
 352	if (size <= PAGE_SIZE)
 353		kfree(buckets);
 354	else
 355		free_pages((unsigned long)buckets, get_order(size));
 356	kfree(nht);
 357}
 358
 359static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 360						unsigned long new_shift)
 361{
 362	unsigned int i, hash;
 363	struct neigh_hash_table *new_nht, *old_nht;
 364
 365	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 366
 367	old_nht = rcu_dereference_protected(tbl->nht,
 368					    lockdep_is_held(&tbl->lock));
 369	new_nht = neigh_hash_alloc(new_shift);
 370	if (!new_nht)
 371		return old_nht;
 372
 373	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 374		struct neighbour *n, *next;
 
 375
 376		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 377						   lockdep_is_held(&tbl->lock));
 378		     n != NULL;
 379		     n = next) {
 380			hash = tbl->hash(n->primary_key, n->dev,
 381					 new_nht->hash_rnd);
 382
 383			hash >>= (32 - new_nht->hash_shift);
 384			next = rcu_dereference_protected(n->next,
 385						lockdep_is_held(&tbl->lock));
 386
 387			rcu_assign_pointer(n->next,
 388					   rcu_dereference_protected(
 389						new_nht->hash_buckets[hash],
 390						lockdep_is_held(&tbl->lock)));
 391			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 392		}
 393	}
 394
 395	rcu_assign_pointer(tbl->nht, new_nht);
 396	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 397	return new_nht;
 398}
 399
 400struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 401			       struct net_device *dev)
 402{
 403	struct neighbour *n;
 404
 405	NEIGH_CACHE_STAT_INC(tbl, lookups);
 406
 407	rcu_read_lock_bh();
 408	n = __neigh_lookup_noref(tbl, pkey, dev);
 409	if (n) {
 410		if (!atomic_inc_not_zero(&n->refcnt))
 411			n = NULL;
 412		NEIGH_CACHE_STAT_INC(tbl, hits);
 413	}
 414
 415	rcu_read_unlock_bh();
 416	return n;
 417}
 418EXPORT_SYMBOL(neigh_lookup);
 419
 420struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 421				     const void *pkey)
 
 
 422{
 423	struct neighbour *n;
 424	int key_len = tbl->key_len;
 425	u32 hash_val;
 426	struct neigh_hash_table *nht;
 427
 428	NEIGH_CACHE_STAT_INC(tbl, lookups);
 429
 430	rcu_read_lock_bh();
 431	nht = rcu_dereference_bh(tbl->nht);
 432	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
 433
 434	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 435	     n != NULL;
 436	     n = rcu_dereference_bh(n->next)) {
 437		if (!memcmp(n->primary_key, pkey, key_len) &&
 438		    net_eq(dev_net(n->dev), net)) {
 439			if (!atomic_inc_not_zero(&n->refcnt))
 440				n = NULL;
 441			NEIGH_CACHE_STAT_INC(tbl, hits);
 442			break;
 443		}
 444	}
 445
 446	rcu_read_unlock_bh();
 447	return n;
 448}
 449EXPORT_SYMBOL(neigh_lookup_nodev);
 450
 451struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 452				 struct net_device *dev, bool want_ref)
 453{
 454	u32 hash_val;
 455	int key_len = tbl->key_len;
 456	int error;
 457	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
 458	struct neigh_hash_table *nht;
 459
 
 
 460	if (!n) {
 461		rc = ERR_PTR(-ENOBUFS);
 462		goto out;
 463	}
 464
 465	memcpy(n->primary_key, pkey, key_len);
 466	n->dev = dev;
 467	dev_hold(dev);
 468
 469	/* Protocol specific setup. */
 470	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 471		rc = ERR_PTR(error);
 472		goto out_neigh_release;
 473	}
 474
 475	if (dev->netdev_ops->ndo_neigh_construct) {
 476		error = dev->netdev_ops->ndo_neigh_construct(n);
 477		if (error < 0) {
 478			rc = ERR_PTR(error);
 479			goto out_neigh_release;
 480		}
 481	}
 482
 483	/* Device specific setup. */
 484	if (n->parms->neigh_setup &&
 485	    (error = n->parms->neigh_setup(n)) < 0) {
 486		rc = ERR_PTR(error);
 487		goto out_neigh_release;
 488	}
 489
 490	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 491
 492	write_lock_bh(&tbl->lock);
 493	nht = rcu_dereference_protected(tbl->nht,
 494					lockdep_is_held(&tbl->lock));
 495
 496	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 497		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 498
 499	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 500
 501	if (n->parms->dead) {
 502		rc = ERR_PTR(-EINVAL);
 503		goto out_tbl_unlock;
 504	}
 505
 506	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 507					    lockdep_is_held(&tbl->lock));
 508	     n1 != NULL;
 509	     n1 = rcu_dereference_protected(n1->next,
 510			lockdep_is_held(&tbl->lock))) {
 511		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
 512			if (want_ref)
 513				neigh_hold(n1);
 514			rc = n1;
 515			goto out_tbl_unlock;
 516		}
 517	}
 518
 519	n->dead = 0;
 
 
 
 
 520	if (want_ref)
 521		neigh_hold(n);
 522	rcu_assign_pointer(n->next,
 523			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 524						     lockdep_is_held(&tbl->lock)));
 525	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 
 526	write_unlock_bh(&tbl->lock);
 527	neigh_dbg(2, "neigh %p is created\n", n);
 528	rc = n;
 529out:
 530	return rc;
 531out_tbl_unlock:
 532	write_unlock_bh(&tbl->lock);
 533out_neigh_release:
 
 
 534	neigh_release(n);
 535	goto out;
 536}
 
 
 
 
 
 
 
 
 537EXPORT_SYMBOL(__neigh_create);
 538
 539static u32 pneigh_hash(const void *pkey, int key_len)
 540{
 541	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 542	hash_val ^= (hash_val >> 16);
 543	hash_val ^= hash_val >> 8;
 544	hash_val ^= hash_val >> 4;
 545	hash_val &= PNEIGH_HASHMASK;
 546	return hash_val;
 547}
 548
 549static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 550					      struct net *net,
 551					      const void *pkey,
 552					      int key_len,
 553					      struct net_device *dev)
 554{
 555	while (n) {
 556		if (!memcmp(n->key, pkey, key_len) &&
 557		    net_eq(pneigh_net(n), net) &&
 558		    (n->dev == dev || !n->dev))
 559			return n;
 560		n = n->next;
 561	}
 562	return NULL;
 563}
 564
 565struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 566		struct net *net, const void *pkey, struct net_device *dev)
 567{
 568	int key_len = tbl->key_len;
 569	u32 hash_val = pneigh_hash(pkey, key_len);
 570
 571	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 572				 net, pkey, key_len, dev);
 573}
 574EXPORT_SYMBOL_GPL(__pneigh_lookup);
 575
 576struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 577				    struct net *net, const void *pkey,
 578				    struct net_device *dev, int creat)
 579{
 580	struct pneigh_entry *n;
 581	int key_len = tbl->key_len;
 582	u32 hash_val = pneigh_hash(pkey, key_len);
 583
 584	read_lock_bh(&tbl->lock);
 585	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 586			      net, pkey, key_len, dev);
 587	read_unlock_bh(&tbl->lock);
 588
 589	if (n || !creat)
 590		goto out;
 591
 592	ASSERT_RTNL();
 593
 594	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
 595	if (!n)
 596		goto out;
 597
 598	write_pnet(&n->net, net);
 599	memcpy(n->key, pkey, key_len);
 600	n->dev = dev;
 601	if (dev)
 602		dev_hold(dev);
 603
 604	if (tbl->pconstructor && tbl->pconstructor(n)) {
 605		if (dev)
 606			dev_put(dev);
 607		kfree(n);
 608		n = NULL;
 609		goto out;
 610	}
 611
 612	write_lock_bh(&tbl->lock);
 613	n->next = tbl->phash_buckets[hash_val];
 614	tbl->phash_buckets[hash_val] = n;
 615	write_unlock_bh(&tbl->lock);
 616out:
 617	return n;
 618}
 619EXPORT_SYMBOL(pneigh_lookup);
 620
 621
 622int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 623		  struct net_device *dev)
 624{
 625	struct pneigh_entry *n, **np;
 626	int key_len = tbl->key_len;
 627	u32 hash_val = pneigh_hash(pkey, key_len);
 628
 629	write_lock_bh(&tbl->lock);
 630	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 631	     np = &n->next) {
 632		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 633		    net_eq(pneigh_net(n), net)) {
 634			*np = n->next;
 635			write_unlock_bh(&tbl->lock);
 636			if (tbl->pdestructor)
 637				tbl->pdestructor(n);
 638			if (n->dev)
 639				dev_put(n->dev);
 640			kfree(n);
 641			return 0;
 642		}
 643	}
 644	write_unlock_bh(&tbl->lock);
 645	return -ENOENT;
 646}
 647
 648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 
 649{
 650	struct pneigh_entry *n, **np;
 651	u32 h;
 652
 653	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 654		np = &tbl->phash_buckets[h];
 655		while ((n = *np) != NULL) {
 656			if (!dev || n->dev == dev) {
 657				*np = n->next;
 658				if (tbl->pdestructor)
 659					tbl->pdestructor(n);
 660				if (n->dev)
 661					dev_put(n->dev);
 662				kfree(n);
 663				continue;
 664			}
 665			np = &n->next;
 666		}
 667	}
 
 
 
 
 
 
 
 
 
 668	return -ENOENT;
 669}
 670
 671static void neigh_parms_destroy(struct neigh_parms *parms);
 672
 673static inline void neigh_parms_put(struct neigh_parms *parms)
 674{
 675	if (atomic_dec_and_test(&parms->refcnt))
 676		neigh_parms_destroy(parms);
 677}
 678
 679/*
 680 *	neighbour must already be out of the table;
 681 *
 682 */
 683void neigh_destroy(struct neighbour *neigh)
 684{
 685	struct net_device *dev = neigh->dev;
 686
 687	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 688
 689	if (!neigh->dead) {
 690		pr_warn("Destroying alive neighbour %p\n", neigh);
 691		dump_stack();
 692		return;
 693	}
 694
 695	if (neigh_del_timer(neigh))
 696		pr_warn("Impossible event\n");
 697
 698	write_lock_bh(&neigh->lock);
 699	__skb_queue_purge(&neigh->arp_queue);
 700	write_unlock_bh(&neigh->lock);
 701	neigh->arp_queue_len_bytes = 0;
 702
 703	if (dev->netdev_ops->ndo_neigh_destroy)
 704		dev->netdev_ops->ndo_neigh_destroy(neigh);
 705
 706	dev_put(dev);
 707	neigh_parms_put(neigh->parms);
 708
 709	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 710
 711	atomic_dec(&neigh->tbl->entries);
 712	kfree_rcu(neigh, rcu);
 713}
 714EXPORT_SYMBOL(neigh_destroy);
 715
 716/* Neighbour state is suspicious;
 717   disable fast path.
 718
 719   Called with write_locked neigh.
 720 */
 721static void neigh_suspect(struct neighbour *neigh)
 722{
 723	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 724
 725	neigh->output = neigh->ops->output;
 726}
 727
 728/* Neighbour state is OK;
 729   enable fast path.
 730
 731   Called with write_locked neigh.
 732 */
 733static void neigh_connect(struct neighbour *neigh)
 734{
 735	neigh_dbg(2, "neigh %p is connected\n", neigh);
 736
 737	neigh->output = neigh->ops->connected_output;
 738}
 739
 740static void neigh_periodic_work(struct work_struct *work)
 741{
 742	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 
 
 743	struct neighbour *n;
 744	struct neighbour __rcu **np;
 745	unsigned int i;
 746	struct neigh_hash_table *nht;
 747
 748	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 749
 750	write_lock_bh(&tbl->lock);
 751	nht = rcu_dereference_protected(tbl->nht,
 752					lockdep_is_held(&tbl->lock));
 753
 754	/*
 755	 *	periodically recompute ReachableTime from random function
 756	 */
 757
 758	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 759		struct neigh_parms *p;
 760		tbl->last_rand = jiffies;
 
 761		list_for_each_entry(p, &tbl->parms_list, list)
 762			p->reachable_time =
 763				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 764	}
 765
 766	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
 767		goto out;
 768
 769	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 770		np = &nht->hash_buckets[i];
 771
 772		while ((n = rcu_dereference_protected(*np,
 773				lockdep_is_held(&tbl->lock))) != NULL) {
 774			unsigned int state;
 775
 776			write_lock(&n->lock);
 777
 778			state = n->nud_state;
 779			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
 
 780				write_unlock(&n->lock);
 781				goto next_elt;
 782			}
 783
 784			if (time_before(n->used, n->confirmed))
 
 785				n->used = n->confirmed;
 786
 787			if (atomic_read(&n->refcnt) == 1 &&
 788			    (state == NUD_FAILED ||
 789			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 790				*np = n->next;
 791				n->dead = 1;
 
 
 792				write_unlock(&n->lock);
 793				neigh_cleanup_and_release(n);
 794				continue;
 795			}
 796			write_unlock(&n->lock);
 797
 798next_elt:
 799			np = &n->next;
 800		}
 801		/*
 802		 * It's fine to release lock here, even if hash table
 803		 * grows while we are preempted.
 804		 */
 805		write_unlock_bh(&tbl->lock);
 806		cond_resched();
 807		write_lock_bh(&tbl->lock);
 808		nht = rcu_dereference_protected(tbl->nht,
 809						lockdep_is_held(&tbl->lock));
 810	}
 811out:
 812	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 813	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 814	 * BASE_REACHABLE_TIME.
 815	 */
 816	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 817			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 818	write_unlock_bh(&tbl->lock);
 819}
 820
 821static __inline__ int neigh_max_probes(struct neighbour *n)
 822{
 823	struct neigh_parms *p = n->parms;
 824	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 825	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 826	        NEIGH_VAR(p, MCAST_PROBES));
 827}
 828
 829static void neigh_invalidate(struct neighbour *neigh)
 830	__releases(neigh->lock)
 831	__acquires(neigh->lock)
 832{
 833	struct sk_buff *skb;
 834
 835	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 836	neigh_dbg(2, "neigh %p is failed\n", neigh);
 837	neigh->updated = jiffies;
 838
 839	/* It is very thin place. report_unreachable is very complicated
 840	   routine. Particularly, it can hit the same neighbour entry!
 841
 842	   So that, we try to be accurate and avoid dead loop. --ANK
 843	 */
 844	while (neigh->nud_state == NUD_FAILED &&
 845	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
 846		write_unlock(&neigh->lock);
 847		neigh->ops->error_report(neigh, skb);
 848		write_lock(&neigh->lock);
 849	}
 850	__skb_queue_purge(&neigh->arp_queue);
 851	neigh->arp_queue_len_bytes = 0;
 852}
 853
 854static void neigh_probe(struct neighbour *neigh)
 855	__releases(neigh->lock)
 856{
 857	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
 858	/* keep skb alive even if arp_queue overflows */
 859	if (skb)
 860		skb = skb_clone(skb, GFP_ATOMIC);
 861	write_unlock(&neigh->lock);
 862	neigh->ops->solicit(neigh, skb);
 
 863	atomic_inc(&neigh->probes);
 864	kfree_skb(skb);
 865}
 866
 867/* Called when a timer expires for a neighbour entry. */
 868
 869static void neigh_timer_handler(unsigned long arg)
 870{
 871	unsigned long now, next;
 872	struct neighbour *neigh = (struct neighbour *)arg;
 873	unsigned int state;
 874	int notify = 0;
 875
 876	write_lock(&neigh->lock);
 877
 878	state = neigh->nud_state;
 879	now = jiffies;
 880	next = now + HZ;
 881
 882	if (!(state & NUD_IN_TIMER))
 883		goto out;
 884
 885	if (state & NUD_REACHABLE) {
 886		if (time_before_eq(now,
 887				   neigh->confirmed + neigh->parms->reachable_time)) {
 888			neigh_dbg(2, "neigh %p is still alive\n", neigh);
 889			next = neigh->confirmed + neigh->parms->reachable_time;
 890		} else if (time_before_eq(now,
 891					  neigh->used +
 892					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 893			neigh_dbg(2, "neigh %p is delayed\n", neigh);
 894			neigh->nud_state = NUD_DELAY;
 895			neigh->updated = jiffies;
 896			neigh_suspect(neigh);
 897			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
 898		} else {
 899			neigh_dbg(2, "neigh %p is suspected\n", neigh);
 900			neigh->nud_state = NUD_STALE;
 901			neigh->updated = jiffies;
 902			neigh_suspect(neigh);
 903			notify = 1;
 904		}
 905	} else if (state & NUD_DELAY) {
 906		if (time_before_eq(now,
 907				   neigh->confirmed +
 908				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 909			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
 910			neigh->nud_state = NUD_REACHABLE;
 911			neigh->updated = jiffies;
 912			neigh_connect(neigh);
 913			notify = 1;
 914			next = neigh->confirmed + neigh->parms->reachable_time;
 915		} else {
 916			neigh_dbg(2, "neigh %p is probed\n", neigh);
 917			neigh->nud_state = NUD_PROBE;
 918			neigh->updated = jiffies;
 919			atomic_set(&neigh->probes, 0);
 920			notify = 1;
 921			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 
 922		}
 923	} else {
 924		/* NUD_PROBE|NUD_INCOMPLETE */
 925		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 926	}
 927
 928	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
 929	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
 930		neigh->nud_state = NUD_FAILED;
 931		notify = 1;
 932		neigh_invalidate(neigh);
 933		goto out;
 934	}
 935
 936	if (neigh->nud_state & NUD_IN_TIMER) {
 937		if (time_before(next, jiffies + HZ/2))
 938			next = jiffies + HZ/2;
 939		if (!mod_timer(&neigh->timer, next))
 940			neigh_hold(neigh);
 941	}
 942	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
 943		neigh_probe(neigh);
 944	} else {
 945out:
 946		write_unlock(&neigh->lock);
 947	}
 948
 949	if (notify)
 950		neigh_update_notify(neigh);
 
 
 951
 952	neigh_release(neigh);
 953}
 954
 955int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
 956{
 957	int rc;
 958	bool immediate_probe = false;
 959
 960	write_lock_bh(&neigh->lock);
 961
 962	rc = 0;
 963	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
 964		goto out_unlock_bh;
 965	if (neigh->dead)
 966		goto out_dead;
 967
 968	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
 969		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
 970		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
 971			unsigned long next, now = jiffies;
 972
 973			atomic_set(&neigh->probes,
 974				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
 975			neigh->nud_state     = NUD_INCOMPLETE;
 
 976			neigh->updated = now;
 977			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
 978					 HZ/2);
 
 
 
 
 
 
 979			neigh_add_timer(neigh, next);
 980			immediate_probe = true;
 981		} else {
 982			neigh->nud_state = NUD_FAILED;
 983			neigh->updated = jiffies;
 984			write_unlock_bh(&neigh->lock);
 985
 986			kfree_skb(skb);
 987			return 1;
 988		}
 989	} else if (neigh->nud_state & NUD_STALE) {
 990		neigh_dbg(2, "neigh %p is delayed\n", neigh);
 991		neigh->nud_state = NUD_DELAY;
 
 992		neigh->updated = jiffies;
 993		neigh_add_timer(neigh, jiffies +
 994				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
 995	}
 996
 997	if (neigh->nud_state == NUD_INCOMPLETE) {
 998		if (skb) {
 999			while (neigh->arp_queue_len_bytes + skb->truesize >
1000			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1001				struct sk_buff *buff;
1002
1003				buff = __skb_dequeue(&neigh->arp_queue);
1004				if (!buff)
1005					break;
1006				neigh->arp_queue_len_bytes -= buff->truesize;
1007				kfree_skb(buff);
1008				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1009			}
1010			skb_dst_force(skb);
1011			__skb_queue_tail(&neigh->arp_queue, skb);
1012			neigh->arp_queue_len_bytes += skb->truesize;
1013		}
1014		rc = 1;
1015	}
1016out_unlock_bh:
1017	if (immediate_probe)
1018		neigh_probe(neigh);
1019	else
1020		write_unlock(&neigh->lock);
1021	local_bh_enable();
 
1022	return rc;
1023
1024out_dead:
1025	if (neigh->nud_state & NUD_STALE)
1026		goto out_unlock_bh;
1027	write_unlock_bh(&neigh->lock);
1028	kfree_skb(skb);
 
1029	return 1;
1030}
1031EXPORT_SYMBOL(__neigh_event_send);
1032
1033static void neigh_update_hhs(struct neighbour *neigh)
1034{
1035	struct hh_cache *hh;
1036	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1037		= NULL;
1038
1039	if (neigh->dev->header_ops)
1040		update = neigh->dev->header_ops->cache_update;
1041
1042	if (update) {
1043		hh = &neigh->hh;
1044		if (hh->hh_len) {
1045			write_seqlock_bh(&hh->hh_lock);
1046			update(hh, neigh->dev, neigh->ha);
1047			write_sequnlock_bh(&hh->hh_lock);
1048		}
1049	}
1050}
1051
1052
1053
1054/* Generic update routine.
1055   -- lladdr is new lladdr or NULL, if it is not supplied.
1056   -- new    is new state.
1057   -- flags
1058	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1059				if it is different.
1060	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1061				lladdr instead of overriding it
1062				if it is different.
1063				It also allows to retain current state
1064				if lladdr is unchanged.
1065	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1066
 
1067	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1068				NTF_ROUTER flag.
1069	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1070				a router.
1071
1072   Caller MUST hold reference count on the entry.
1073 */
1074
1075int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1076		 u32 flags)
1077{
1078	u8 old;
1079	int err;
1080	int notify = 0;
1081	struct net_device *dev;
1082	int update_isrouter = 0;
 
 
 
 
 
1083
1084	write_lock_bh(&neigh->lock);
1085
1086	dev    = neigh->dev;
1087	old    = neigh->nud_state;
1088	err    = -EPERM;
1089
 
 
 
 
 
1090	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1091	    (old & (NUD_NOARP | NUD_PERMANENT)))
1092		goto out;
1093	if (neigh->dead)
 
 
 
 
 
1094		goto out;
 
1095
1096	if (!(new & NUD_VALID)) {
1097		neigh_del_timer(neigh);
1098		if (old & NUD_CONNECTED)
1099			neigh_suspect(neigh);
1100		neigh->nud_state = new;
1101		err = 0;
1102		notify = old & NUD_VALID;
1103		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1104		    (new & NUD_FAILED)) {
1105			neigh_invalidate(neigh);
1106			notify = 1;
1107		}
1108		goto out;
1109	}
1110
1111	/* Compare new lladdr with cached one */
1112	if (!dev->addr_len) {
1113		/* First case: device needs no address. */
1114		lladdr = neigh->ha;
1115	} else if (lladdr) {
1116		/* The second case: if something is already cached
1117		   and a new address is proposed:
1118		   - compare new & old
1119		   - if they are different, check override flag
1120		 */
1121		if ((old & NUD_VALID) &&
1122		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1123			lladdr = neigh->ha;
1124	} else {
1125		/* No address is supplied; if we know something,
1126		   use it, otherwise discard the request.
1127		 */
1128		err = -EINVAL;
1129		if (!(old & NUD_VALID))
 
1130			goto out;
 
1131		lladdr = neigh->ha;
1132	}
1133
 
 
 
1134	if (new & NUD_CONNECTED)
1135		neigh->confirmed = jiffies;
1136	neigh->updated = jiffies;
1137
1138	/* If entry was valid and address is not changed,
1139	   do not change entry state, if new one is STALE.
1140	 */
1141	err = 0;
1142	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1143	if (old & NUD_VALID) {
1144		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1145			update_isrouter = 0;
1146			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1147			    (old & NUD_CONNECTED)) {
1148				lladdr = neigh->ha;
1149				new = NUD_STALE;
1150			} else
1151				goto out;
1152		} else {
1153			if (lladdr == neigh->ha && new == NUD_STALE &&
1154			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1155			     (old & NUD_CONNECTED))
1156			    )
1157				new = old;
1158		}
1159	}
1160
 
 
 
 
 
 
 
1161	if (new != old) {
1162		neigh_del_timer(neigh);
1163		if (new & NUD_PROBE)
1164			atomic_set(&neigh->probes, 0);
1165		if (new & NUD_IN_TIMER)
1166			neigh_add_timer(neigh, (jiffies +
1167						((new & NUD_REACHABLE) ?
1168						 neigh->parms->reachable_time :
1169						 0)));
1170		neigh->nud_state = new;
1171		notify = 1;
1172	}
1173
1174	if (lladdr != neigh->ha) {
1175		write_seqlock(&neigh->ha_lock);
1176		memcpy(&neigh->ha, lladdr, dev->addr_len);
1177		write_sequnlock(&neigh->ha_lock);
1178		neigh_update_hhs(neigh);
1179		if (!(new & NUD_CONNECTED))
1180			neigh->confirmed = jiffies -
1181				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1182		notify = 1;
1183	}
1184	if (new == old)
1185		goto out;
1186	if (new & NUD_CONNECTED)
1187		neigh_connect(neigh);
1188	else
1189		neigh_suspect(neigh);
1190	if (!(old & NUD_VALID)) {
1191		struct sk_buff *skb;
1192
1193		/* Again: avoid dead loop if something went wrong */
1194
1195		while (neigh->nud_state & NUD_VALID &&
1196		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1197			struct dst_entry *dst = skb_dst(skb);
1198			struct neighbour *n2, *n1 = neigh;
1199			write_unlock_bh(&neigh->lock);
1200
1201			rcu_read_lock();
1202
1203			/* Why not just use 'neigh' as-is?  The problem is that
1204			 * things such as shaper, eql, and sch_teql can end up
1205			 * using alternative, different, neigh objects to output
1206			 * the packet in the output path.  So what we need to do
1207			 * here is re-lookup the top-level neigh in the path so
1208			 * we can reinject the packet there.
1209			 */
1210			n2 = NULL;
1211			if (dst) {
1212				n2 = dst_neigh_lookup_skb(dst, skb);
1213				if (n2)
1214					n1 = n2;
1215			}
1216			n1->output(n1, skb);
1217			if (n2)
1218				neigh_release(n2);
1219			rcu_read_unlock();
1220
1221			write_lock_bh(&neigh->lock);
1222		}
1223		__skb_queue_purge(&neigh->arp_queue);
1224		neigh->arp_queue_len_bytes = 0;
1225	}
1226out:
1227	if (update_isrouter) {
1228		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1229			(neigh->flags | NTF_ROUTER) :
1230			(neigh->flags & ~NTF_ROUTER);
1231	}
1232	write_unlock_bh(&neigh->lock);
1233
 
 
 
1234	if (notify)
1235		neigh_update_notify(neigh);
1236
1237	return err;
1238}
 
 
 
 
 
 
1239EXPORT_SYMBOL(neigh_update);
1240
1241/* Update the neigh to listen temporarily for probe responses, even if it is
1242 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1243 */
1244void __neigh_set_probe_once(struct neighbour *neigh)
1245{
1246	if (neigh->dead)
1247		return;
1248	neigh->updated = jiffies;
1249	if (!(neigh->nud_state & NUD_FAILED))
1250		return;
1251	neigh->nud_state = NUD_INCOMPLETE;
1252	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253	neigh_add_timer(neigh,
1254			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 
1255}
1256EXPORT_SYMBOL(__neigh_set_probe_once);
1257
1258struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1259				 u8 *lladdr, void *saddr,
1260				 struct net_device *dev)
1261{
1262	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1263						 lladdr || !dev->addr_len);
1264	if (neigh)
1265		neigh_update(neigh, lladdr, NUD_STALE,
1266			     NEIGH_UPDATE_F_OVERRIDE);
1267	return neigh;
1268}
1269EXPORT_SYMBOL(neigh_event_ns);
1270
1271/* called with read_lock_bh(&n->lock); */
1272static void neigh_hh_init(struct neighbour *n)
1273{
1274	struct net_device *dev = n->dev;
1275	__be16 prot = n->tbl->protocol;
1276	struct hh_cache	*hh = &n->hh;
1277
1278	write_lock_bh(&n->lock);
1279
1280	/* Only one thread can come in here and initialize the
1281	 * hh_cache entry.
1282	 */
1283	if (!hh->hh_len)
1284		dev->header_ops->cache(n, hh, prot);
1285
1286	write_unlock_bh(&n->lock);
1287}
1288
1289/* Slow and careful. */
1290
1291int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1292{
1293	int rc = 0;
1294
1295	if (!neigh_event_send(neigh, skb)) {
1296		int err;
1297		struct net_device *dev = neigh->dev;
1298		unsigned int seq;
1299
1300		if (dev->header_ops->cache && !neigh->hh.hh_len)
1301			neigh_hh_init(neigh);
1302
1303		do {
1304			__skb_pull(skb, skb_network_offset(skb));
1305			seq = read_seqbegin(&neigh->ha_lock);
1306			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1307					      neigh->ha, NULL, skb->len);
1308		} while (read_seqretry(&neigh->ha_lock, seq));
1309
1310		if (err >= 0)
1311			rc = dev_queue_xmit(skb);
1312		else
1313			goto out_kfree_skb;
1314	}
1315out:
1316	return rc;
1317out_kfree_skb:
1318	rc = -EINVAL;
1319	kfree_skb(skb);
1320	goto out;
1321}
1322EXPORT_SYMBOL(neigh_resolve_output);
1323
1324/* As fast as possible without hh cache */
1325
1326int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1327{
1328	struct net_device *dev = neigh->dev;
1329	unsigned int seq;
1330	int err;
1331
1332	do {
1333		__skb_pull(skb, skb_network_offset(skb));
1334		seq = read_seqbegin(&neigh->ha_lock);
1335		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1336				      neigh->ha, NULL, skb->len);
1337	} while (read_seqretry(&neigh->ha_lock, seq));
1338
1339	if (err >= 0)
1340		err = dev_queue_xmit(skb);
1341	else {
1342		err = -EINVAL;
1343		kfree_skb(skb);
1344	}
1345	return err;
1346}
1347EXPORT_SYMBOL(neigh_connected_output);
1348
1349int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1350{
1351	return dev_queue_xmit(skb);
1352}
1353EXPORT_SYMBOL(neigh_direct_output);
1354
1355static void neigh_proxy_process(unsigned long arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1356{
1357	struct neigh_table *tbl = (struct neigh_table *)arg;
1358	long sched_next = 0;
1359	unsigned long now = jiffies;
1360	struct sk_buff *skb, *n;
1361
1362	spin_lock(&tbl->proxy_queue.lock);
1363
1364	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1365		long tdif = NEIGH_CB(skb)->sched_next - now;
1366
1367		if (tdif <= 0) {
1368			struct net_device *dev = skb->dev;
1369
 
1370			__skb_unlink(skb, &tbl->proxy_queue);
 
1371			if (tbl->proxy_redo && netif_running(dev)) {
1372				rcu_read_lock();
1373				tbl->proxy_redo(skb);
1374				rcu_read_unlock();
1375			} else {
1376				kfree_skb(skb);
1377			}
1378
1379			dev_put(dev);
1380		} else if (!sched_next || tdif < sched_next)
1381			sched_next = tdif;
1382	}
1383	del_timer(&tbl->proxy_timer);
1384	if (sched_next)
1385		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1386	spin_unlock(&tbl->proxy_queue.lock);
1387}
1388
 
 
 
 
 
 
 
 
 
 
 
1389void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1390		    struct sk_buff *skb)
1391{
1392	unsigned long now = jiffies;
1393
1394	unsigned long sched_next = now + (prandom_u32() %
1395					  NEIGH_VAR(p, PROXY_DELAY));
1396
1397	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1398		kfree_skb(skb);
1399		return;
1400	}
1401
1402	NEIGH_CB(skb)->sched_next = sched_next;
1403	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1404
1405	spin_lock(&tbl->proxy_queue.lock);
1406	if (del_timer(&tbl->proxy_timer)) {
1407		if (time_before(tbl->proxy_timer.expires, sched_next))
1408			sched_next = tbl->proxy_timer.expires;
1409	}
1410	skb_dst_drop(skb);
1411	dev_hold(skb->dev);
1412	__skb_queue_tail(&tbl->proxy_queue, skb);
 
1413	mod_timer(&tbl->proxy_timer, sched_next);
1414	spin_unlock(&tbl->proxy_queue.lock);
1415}
1416EXPORT_SYMBOL(pneigh_enqueue);
1417
1418static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1419						      struct net *net, int ifindex)
1420{
1421	struct neigh_parms *p;
1422
1423	list_for_each_entry(p, &tbl->parms_list, list) {
1424		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1425		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1426			return p;
1427	}
1428
1429	return NULL;
1430}
1431
1432struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1433				      struct neigh_table *tbl)
1434{
1435	struct neigh_parms *p;
1436	struct net *net = dev_net(dev);
1437	const struct net_device_ops *ops = dev->netdev_ops;
1438
1439	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1440	if (p) {
1441		p->tbl		  = tbl;
1442		atomic_set(&p->refcnt, 1);
1443		p->reachable_time =
1444				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1445		dev_hold(dev);
 
1446		p->dev = dev;
1447		write_pnet(&p->net, net);
1448		p->sysctl_table = NULL;
1449
1450		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1451			dev_put(dev);
1452			kfree(p);
1453			return NULL;
1454		}
1455
1456		write_lock_bh(&tbl->lock);
1457		list_add(&p->list, &tbl->parms.list);
1458		write_unlock_bh(&tbl->lock);
1459
1460		neigh_parms_data_state_cleanall(p);
1461	}
1462	return p;
1463}
1464EXPORT_SYMBOL(neigh_parms_alloc);
1465
1466static void neigh_rcu_free_parms(struct rcu_head *head)
1467{
1468	struct neigh_parms *parms =
1469		container_of(head, struct neigh_parms, rcu_head);
1470
1471	neigh_parms_put(parms);
1472}
1473
1474void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1475{
1476	if (!parms || parms == &tbl->parms)
1477		return;
1478	write_lock_bh(&tbl->lock);
1479	list_del(&parms->list);
1480	parms->dead = 1;
1481	write_unlock_bh(&tbl->lock);
1482	if (parms->dev)
1483		dev_put(parms->dev);
1484	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1485}
1486EXPORT_SYMBOL(neigh_parms_release);
1487
1488static void neigh_parms_destroy(struct neigh_parms *parms)
1489{
1490	kfree(parms);
1491}
1492
1493static struct lock_class_key neigh_table_proxy_queue_class;
1494
1495static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1496
1497void neigh_table_init(int index, struct neigh_table *tbl)
1498{
1499	unsigned long now = jiffies;
1500	unsigned long phsize;
1501
1502	INIT_LIST_HEAD(&tbl->parms_list);
 
 
 
1503	list_add(&tbl->parms.list, &tbl->parms_list);
1504	write_pnet(&tbl->parms.net, &init_net);
1505	atomic_set(&tbl->parms.refcnt, 1);
1506	tbl->parms.reachable_time =
1507			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
 
1508
1509	tbl->stats = alloc_percpu(struct neigh_statistics);
1510	if (!tbl->stats)
1511		panic("cannot create neighbour cache statistics");
1512
1513#ifdef CONFIG_PROC_FS
1514	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1515			      &neigh_stat_seq_fops, tbl))
1516		panic("cannot create neighbour proc dir entry");
1517#endif
1518
1519	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1520
1521	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1522	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1523
1524	if (!tbl->nht || !tbl->phash_buckets)
1525		panic("cannot allocate neighbour cache hashes");
1526
1527	if (!tbl->entry_size)
1528		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1529					tbl->key_len, NEIGH_PRIV_ALIGN);
1530	else
1531		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1532
1533	rwlock_init(&tbl->lock);
 
1534	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1535	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1536			tbl->parms.reachable_time);
1537	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
 
 
 
1538	skb_queue_head_init_class(&tbl->proxy_queue,
1539			&neigh_table_proxy_queue_class);
1540
1541	tbl->last_flush = now;
1542	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1543
1544	neigh_tables[index] = tbl;
1545}
1546EXPORT_SYMBOL(neigh_table_init);
1547
 
 
 
 
1548int neigh_table_clear(int index, struct neigh_table *tbl)
1549{
1550	neigh_tables[index] = NULL;
 
 
1551	/* It is not clean... Fix it to unload IPv6 module safely */
 
1552	cancel_delayed_work_sync(&tbl->gc_work);
1553	del_timer_sync(&tbl->proxy_timer);
1554	pneigh_queue_purge(&tbl->proxy_queue);
1555	neigh_ifdown(tbl, NULL);
1556	if (atomic_read(&tbl->entries))
1557		pr_crit("neighbour leakage\n");
1558
1559	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1560		 neigh_hash_free_rcu);
1561	tbl->nht = NULL;
1562
1563	kfree(tbl->phash_buckets);
1564	tbl->phash_buckets = NULL;
1565
1566	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1567
1568	free_percpu(tbl->stats);
1569	tbl->stats = NULL;
1570
1571	return 0;
1572}
1573EXPORT_SYMBOL(neigh_table_clear);
1574
1575static struct neigh_table *neigh_find_table(int family)
1576{
1577	struct neigh_table *tbl = NULL;
1578
1579	switch (family) {
1580	case AF_INET:
1581		tbl = neigh_tables[NEIGH_ARP_TABLE];
1582		break;
1583	case AF_INET6:
1584		tbl = neigh_tables[NEIGH_ND_TABLE];
1585		break;
1586	case AF_DECnet:
1587		tbl = neigh_tables[NEIGH_DN_TABLE];
1588		break;
1589	}
1590
1591	return tbl;
1592}
1593
1594static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1595{
1596	struct net *net = sock_net(skb->sk);
1597	struct ndmsg *ndm;
1598	struct nlattr *dst_attr;
1599	struct neigh_table *tbl;
1600	struct neighbour *neigh;
1601	struct net_device *dev = NULL;
1602	int err = -EINVAL;
1603
1604	ASSERT_RTNL();
1605	if (nlmsg_len(nlh) < sizeof(*ndm))
1606		goto out;
1607
1608	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1609	if (dst_attr == NULL)
 
1610		goto out;
 
1611
1612	ndm = nlmsg_data(nlh);
1613	if (ndm->ndm_ifindex) {
1614		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1615		if (dev == NULL) {
1616			err = -ENODEV;
1617			goto out;
1618		}
1619	}
1620
1621	tbl = neigh_find_table(ndm->ndm_family);
1622	if (tbl == NULL)
1623		return -EAFNOSUPPORT;
1624
1625	if (nla_len(dst_attr) < tbl->key_len)
 
1626		goto out;
 
1627
1628	if (ndm->ndm_flags & NTF_PROXY) {
1629		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1630		goto out;
1631	}
1632
1633	if (dev == NULL)
1634		goto out;
1635
1636	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1637	if (neigh == NULL) {
1638		err = -ENOENT;
1639		goto out;
1640	}
1641
1642	err = neigh_update(neigh, NULL, NUD_FAILED,
1643			   NEIGH_UPDATE_F_OVERRIDE |
1644			   NEIGH_UPDATE_F_ADMIN);
 
1645	neigh_release(neigh);
 
 
1646
1647out:
1648	return err;
1649}
1650
1651static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 
1652{
1653	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
 
1654	struct net *net = sock_net(skb->sk);
1655	struct ndmsg *ndm;
1656	struct nlattr *tb[NDA_MAX+1];
1657	struct neigh_table *tbl;
1658	struct net_device *dev = NULL;
1659	struct neighbour *neigh;
1660	void *dst, *lladdr;
 
 
1661	int err;
1662
1663	ASSERT_RTNL();
1664	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
 
1665	if (err < 0)
1666		goto out;
1667
1668	err = -EINVAL;
1669	if (tb[NDA_DST] == NULL)
 
1670		goto out;
 
1671
1672	ndm = nlmsg_data(nlh);
 
 
 
 
 
 
 
 
 
1673	if (ndm->ndm_ifindex) {
1674		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1675		if (dev == NULL) {
1676			err = -ENODEV;
1677			goto out;
1678		}
1679
1680		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
 
1681			goto out;
 
1682	}
1683
1684	tbl = neigh_find_table(ndm->ndm_family);
1685	if (tbl == NULL)
1686		return -EAFNOSUPPORT;
1687
1688	if (nla_len(tb[NDA_DST]) < tbl->key_len)
 
1689		goto out;
 
 
1690	dst = nla_data(tb[NDA_DST]);
1691	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1692
1693	if (ndm->ndm_flags & NTF_PROXY) {
 
 
1694		struct pneigh_entry *pn;
1695
 
 
 
 
 
1696		err = -ENOBUFS;
1697		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1698		if (pn) {
1699			pn->flags = ndm->ndm_flags;
 
 
1700			err = 0;
1701		}
1702		goto out;
1703	}
1704
1705	if (dev == NULL)
 
1706		goto out;
 
 
 
 
 
 
1707
1708	neigh = neigh_lookup(tbl, dst, dev);
1709	if (neigh == NULL) {
 
 
 
 
1710		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1711			err = -ENOENT;
1712			goto out;
1713		}
 
 
 
 
 
1714
1715		neigh = __neigh_lookup_errno(tbl, dst, dev);
 
 
 
1716		if (IS_ERR(neigh)) {
1717			err = PTR_ERR(neigh);
1718			goto out;
1719		}
1720	} else {
1721		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1722			err = -EEXIST;
1723			neigh_release(neigh);
1724			goto out;
1725		}
1726
1727		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1728			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
 
1729	}
1730
1731	if (ndm->ndm_flags & NTF_USE) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1732		neigh_event_send(neigh, NULL);
1733		err = 0;
1734	} else
1735		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1736	neigh_release(neigh);
1737
1738out:
1739	return err;
1740}
1741
1742static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1743{
1744	struct nlattr *nest;
1745
1746	nest = nla_nest_start(skb, NDTA_PARMS);
1747	if (nest == NULL)
1748		return -ENOBUFS;
1749
1750	if ((parms->dev &&
1751	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1752	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1753	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1754			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1755	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1756	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1757			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1758	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1759	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1760	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1761			NEIGH_VAR(parms, UCAST_PROBES)) ||
1762	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1763			NEIGH_VAR(parms, MCAST_PROBES)) ||
1764	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1765			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1766	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
 
1767	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1768			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1769	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1770			  NEIGH_VAR(parms, GC_STALETIME)) ||
1771	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1772			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1773	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1774			  NEIGH_VAR(parms, RETRANS_TIME)) ||
1775	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1776			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1777	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1778			  NEIGH_VAR(parms, PROXY_DELAY)) ||
1779	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1780			  NEIGH_VAR(parms, LOCKTIME)))
 
 
1781		goto nla_put_failure;
1782	return nla_nest_end(skb, nest);
1783
1784nla_put_failure:
1785	nla_nest_cancel(skb, nest);
1786	return -EMSGSIZE;
1787}
1788
1789static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1790			      u32 pid, u32 seq, int type, int flags)
1791{
1792	struct nlmsghdr *nlh;
1793	struct ndtmsg *ndtmsg;
1794
1795	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1796	if (nlh == NULL)
1797		return -EMSGSIZE;
1798
1799	ndtmsg = nlmsg_data(nlh);
1800
1801	read_lock_bh(&tbl->lock);
1802	ndtmsg->ndtm_family = tbl->family;
1803	ndtmsg->ndtm_pad1   = 0;
1804	ndtmsg->ndtm_pad2   = 0;
1805
1806	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1807	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1808	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1809	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1810	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
 
1811		goto nla_put_failure;
1812	{
1813		unsigned long now = jiffies;
1814		unsigned int flush_delta = now - tbl->last_flush;
1815		unsigned int rand_delta = now - tbl->last_rand;
1816		struct neigh_hash_table *nht;
1817		struct ndt_config ndc = {
1818			.ndtc_key_len		= tbl->key_len,
1819			.ndtc_entry_size	= tbl->entry_size,
1820			.ndtc_entries		= atomic_read(&tbl->entries),
1821			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1822			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1823			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1824		};
1825
1826		rcu_read_lock_bh();
1827		nht = rcu_dereference_bh(tbl->nht);
1828		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1829		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1830		rcu_read_unlock_bh();
1831
1832		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1833			goto nla_put_failure;
1834	}
1835
1836	{
1837		int cpu;
1838		struct ndt_stats ndst;
1839
1840		memset(&ndst, 0, sizeof(ndst));
1841
1842		for_each_possible_cpu(cpu) {
1843			struct neigh_statistics	*st;
1844
1845			st = per_cpu_ptr(tbl->stats, cpu);
1846			ndst.ndts_allocs		+= st->allocs;
1847			ndst.ndts_destroys		+= st->destroys;
1848			ndst.ndts_hash_grows		+= st->hash_grows;
1849			ndst.ndts_res_failed		+= st->res_failed;
1850			ndst.ndts_lookups		+= st->lookups;
1851			ndst.ndts_hits			+= st->hits;
1852			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1853			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1854			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1855			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1856			ndst.ndts_table_fulls		+= st->table_fulls;
1857		}
1858
1859		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
 
1860			goto nla_put_failure;
1861	}
1862
1863	BUG_ON(tbl->parms.dev);
1864	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1865		goto nla_put_failure;
1866
1867	read_unlock_bh(&tbl->lock);
1868	nlmsg_end(skb, nlh);
1869	return 0;
1870
1871nla_put_failure:
1872	read_unlock_bh(&tbl->lock);
1873	nlmsg_cancel(skb, nlh);
1874	return -EMSGSIZE;
1875}
1876
1877static int neightbl_fill_param_info(struct sk_buff *skb,
1878				    struct neigh_table *tbl,
1879				    struct neigh_parms *parms,
1880				    u32 pid, u32 seq, int type,
1881				    unsigned int flags)
1882{
1883	struct ndtmsg *ndtmsg;
1884	struct nlmsghdr *nlh;
1885
1886	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1887	if (nlh == NULL)
1888		return -EMSGSIZE;
1889
1890	ndtmsg = nlmsg_data(nlh);
1891
1892	read_lock_bh(&tbl->lock);
1893	ndtmsg->ndtm_family = tbl->family;
1894	ndtmsg->ndtm_pad1   = 0;
1895	ndtmsg->ndtm_pad2   = 0;
1896
1897	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1898	    neightbl_fill_parms(skb, parms) < 0)
1899		goto errout;
1900
1901	read_unlock_bh(&tbl->lock);
1902	nlmsg_end(skb, nlh);
1903	return 0;
1904errout:
1905	read_unlock_bh(&tbl->lock);
1906	nlmsg_cancel(skb, nlh);
1907	return -EMSGSIZE;
1908}
1909
1910static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1911	[NDTA_NAME]		= { .type = NLA_STRING },
1912	[NDTA_THRESH1]		= { .type = NLA_U32 },
1913	[NDTA_THRESH2]		= { .type = NLA_U32 },
1914	[NDTA_THRESH3]		= { .type = NLA_U32 },
1915	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1916	[NDTA_PARMS]		= { .type = NLA_NESTED },
1917};
1918
1919static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1920	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1921	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1922	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1923	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1924	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1925	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1926	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1927	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1928	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1929	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1930	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1931	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1932	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1933	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
 
1934};
1935
1936static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
 
1937{
1938	struct net *net = sock_net(skb->sk);
1939	struct neigh_table *tbl;
1940	struct ndtmsg *ndtmsg;
1941	struct nlattr *tb[NDTA_MAX+1];
1942	bool found = false;
1943	int err, tidx;
1944
1945	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1946			  nl_neightbl_policy);
1947	if (err < 0)
1948		goto errout;
1949
1950	if (tb[NDTA_NAME] == NULL) {
1951		err = -EINVAL;
1952		goto errout;
1953	}
1954
1955	ndtmsg = nlmsg_data(nlh);
1956
1957	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1958		tbl = neigh_tables[tidx];
1959		if (!tbl)
1960			continue;
1961		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1962			continue;
1963		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1964			found = true;
1965			break;
1966		}
1967	}
1968
1969	if (!found)
1970		return -ENOENT;
1971
1972	/*
1973	 * We acquire tbl->lock to be nice to the periodic timers and
1974	 * make sure they always see a consistent set of values.
1975	 */
1976	write_lock_bh(&tbl->lock);
1977
1978	if (tb[NDTA_PARMS]) {
1979		struct nlattr *tbp[NDTPA_MAX+1];
1980		struct neigh_parms *p;
1981		int i, ifindex = 0;
1982
1983		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1984				       nl_ntbl_parm_policy);
 
1985		if (err < 0)
1986			goto errout_tbl_lock;
1987
1988		if (tbp[NDTPA_IFINDEX])
1989			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1990
1991		p = lookup_neigh_parms(tbl, net, ifindex);
1992		if (p == NULL) {
1993			err = -ENOENT;
1994			goto errout_tbl_lock;
1995		}
1996
1997		for (i = 1; i <= NDTPA_MAX; i++) {
1998			if (tbp[i] == NULL)
1999				continue;
2000
2001			switch (i) {
2002			case NDTPA_QUEUE_LEN:
2003				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2004					      nla_get_u32(tbp[i]) *
2005					      SKB_TRUESIZE(ETH_FRAME_LEN));
2006				break;
2007			case NDTPA_QUEUE_LENBYTES:
2008				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2009					      nla_get_u32(tbp[i]));
2010				break;
2011			case NDTPA_PROXY_QLEN:
2012				NEIGH_VAR_SET(p, PROXY_QLEN,
2013					      nla_get_u32(tbp[i]));
2014				break;
2015			case NDTPA_APP_PROBES:
2016				NEIGH_VAR_SET(p, APP_PROBES,
2017					      nla_get_u32(tbp[i]));
2018				break;
2019			case NDTPA_UCAST_PROBES:
2020				NEIGH_VAR_SET(p, UCAST_PROBES,
2021					      nla_get_u32(tbp[i]));
2022				break;
2023			case NDTPA_MCAST_PROBES:
2024				NEIGH_VAR_SET(p, MCAST_PROBES,
2025					      nla_get_u32(tbp[i]));
2026				break;
2027			case NDTPA_MCAST_REPROBES:
2028				NEIGH_VAR_SET(p, MCAST_REPROBES,
2029					      nla_get_u32(tbp[i]));
2030				break;
2031			case NDTPA_BASE_REACHABLE_TIME:
2032				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2033					      nla_get_msecs(tbp[i]));
2034				/* update reachable_time as well, otherwise, the change will
2035				 * only be effective after the next time neigh_periodic_work
2036				 * decides to recompute it (can be multiple minutes)
2037				 */
2038				p->reachable_time =
2039					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2040				break;
2041			case NDTPA_GC_STALETIME:
2042				NEIGH_VAR_SET(p, GC_STALETIME,
2043					      nla_get_msecs(tbp[i]));
2044				break;
2045			case NDTPA_DELAY_PROBE_TIME:
2046				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2047					      nla_get_msecs(tbp[i]));
 
 
 
 
 
2048				break;
2049			case NDTPA_RETRANS_TIME:
2050				NEIGH_VAR_SET(p, RETRANS_TIME,
2051					      nla_get_msecs(tbp[i]));
2052				break;
2053			case NDTPA_ANYCAST_DELAY:
2054				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2055					      nla_get_msecs(tbp[i]));
2056				break;
2057			case NDTPA_PROXY_DELAY:
2058				NEIGH_VAR_SET(p, PROXY_DELAY,
2059					      nla_get_msecs(tbp[i]));
2060				break;
2061			case NDTPA_LOCKTIME:
2062				NEIGH_VAR_SET(p, LOCKTIME,
2063					      nla_get_msecs(tbp[i]));
2064				break;
2065			}
2066		}
2067	}
2068
2069	err = -ENOENT;
2070	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2071	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2072	    !net_eq(net, &init_net))
2073		goto errout_tbl_lock;
2074
2075	if (tb[NDTA_THRESH1])
2076		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2077
2078	if (tb[NDTA_THRESH2])
2079		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2080
2081	if (tb[NDTA_THRESH3])
2082		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2083
2084	if (tb[NDTA_GC_INTERVAL])
2085		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2086
2087	err = 0;
2088
2089errout_tbl_lock:
2090	write_unlock_bh(&tbl->lock);
2091errout:
2092	return err;
2093}
2094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2095static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2096{
 
2097	struct net *net = sock_net(skb->sk);
2098	int family, tidx, nidx = 0;
2099	int tbl_skip = cb->args[0];
2100	int neigh_skip = cb->args[1];
2101	struct neigh_table *tbl;
2102
2103	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
 
 
 
 
 
 
 
2104
2105	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2106		struct neigh_parms *p;
2107
2108		tbl = neigh_tables[tidx];
2109		if (!tbl)
2110			continue;
2111
2112		if (tidx < tbl_skip || (family && tbl->family != family))
2113			continue;
2114
2115		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2116				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2117				       NLM_F_MULTI) < 0)
2118			break;
2119
2120		nidx = 0;
2121		p = list_next_entry(&tbl->parms, list);
2122		list_for_each_entry_from(p, &tbl->parms_list, list) {
2123			if (!net_eq(neigh_parms_net(p), net))
2124				continue;
2125
2126			if (nidx < neigh_skip)
2127				goto next;
2128
2129			if (neightbl_fill_param_info(skb, tbl, p,
2130						     NETLINK_CB(cb->skb).portid,
2131						     cb->nlh->nlmsg_seq,
2132						     RTM_NEWNEIGHTBL,
2133						     NLM_F_MULTI) < 0)
2134				goto out;
2135		next:
2136			nidx++;
2137		}
2138
2139		neigh_skip = 0;
2140	}
2141out:
2142	cb->args[0] = tidx;
2143	cb->args[1] = nidx;
2144
2145	return skb->len;
2146}
2147
2148static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2149			   u32 pid, u32 seq, int type, unsigned int flags)
2150{
 
2151	unsigned long now = jiffies;
2152	struct nda_cacheinfo ci;
2153	struct nlmsghdr *nlh;
2154	struct ndmsg *ndm;
2155
2156	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2157	if (nlh == NULL)
2158		return -EMSGSIZE;
2159
 
 
 
2160	ndm = nlmsg_data(nlh);
2161	ndm->ndm_family	 = neigh->ops->family;
2162	ndm->ndm_pad1    = 0;
2163	ndm->ndm_pad2    = 0;
2164	ndm->ndm_flags	 = neigh->flags;
2165	ndm->ndm_type	 = neigh->type;
2166	ndm->ndm_ifindex = neigh->dev->ifindex;
2167
2168	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2169		goto nla_put_failure;
2170
2171	read_lock_bh(&neigh->lock);
2172	ndm->ndm_state	 = neigh->nud_state;
2173	if (neigh->nud_state & NUD_VALID) {
2174		char haddr[MAX_ADDR_LEN];
2175
2176		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2177		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2178			read_unlock_bh(&neigh->lock);
2179			goto nla_put_failure;
2180		}
2181	}
2182
2183	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2184	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2185	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2186	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2187	read_unlock_bh(&neigh->lock);
2188
2189	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2190	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2191		goto nla_put_failure;
2192
 
 
 
 
 
2193	nlmsg_end(skb, nlh);
2194	return 0;
2195
2196nla_put_failure:
2197	nlmsg_cancel(skb, nlh);
2198	return -EMSGSIZE;
2199}
2200
2201static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2202			    u32 pid, u32 seq, int type, unsigned int flags,
2203			    struct neigh_table *tbl)
2204{
 
2205	struct nlmsghdr *nlh;
2206	struct ndmsg *ndm;
2207
2208	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2209	if (nlh == NULL)
2210		return -EMSGSIZE;
2211
 
 
 
2212	ndm = nlmsg_data(nlh);
2213	ndm->ndm_family	 = tbl->family;
2214	ndm->ndm_pad1    = 0;
2215	ndm->ndm_pad2    = 0;
2216	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2217	ndm->ndm_type	 = RTN_UNICAST;
2218	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219	ndm->ndm_state	 = NUD_NONE;
2220
2221	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2222		goto nla_put_failure;
2223
 
 
 
 
 
2224	nlmsg_end(skb, nlh);
2225	return 0;
2226
2227nla_put_failure:
2228	nlmsg_cancel(skb, nlh);
2229	return -EMSGSIZE;
2230}
2231
2232static void neigh_update_notify(struct neighbour *neigh)
2233{
2234	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2235	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2236}
2237
2238static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2239{
2240	struct net_device *master;
2241
2242	if (!master_idx)
2243		return false;
2244
2245	master = netdev_master_upper_dev_get(dev);
 
 
 
 
 
 
 
2246	if (!master || master->ifindex != master_idx)
2247		return true;
2248
2249	return false;
2250}
2251
2252static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2253{
2254	if (filter_idx && dev->ifindex != filter_idx)
2255		return true;
2256
2257	return false;
2258}
2259
 
 
 
 
 
2260static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2261			    struct netlink_callback *cb)
 
2262{
2263	struct net *net = sock_net(skb->sk);
2264	const struct nlmsghdr *nlh = cb->nlh;
2265	struct nlattr *tb[NDA_MAX + 1];
2266	struct neighbour *n;
2267	int rc, h, s_h = cb->args[1];
2268	int idx, s_idx = idx = cb->args[2];
2269	struct neigh_hash_table *nht;
2270	int filter_master_idx = 0, filter_idx = 0;
2271	unsigned int flags = NLM_F_MULTI;
2272	int err;
2273
2274	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2275	if (!err) {
2276		if (tb[NDA_IFINDEX])
2277			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2278
2279		if (tb[NDA_MASTER])
2280			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2281
2282		if (filter_idx || filter_master_idx)
2283			flags |= NLM_F_DUMP_FILTERED;
2284	}
2285
2286	rcu_read_lock_bh();
2287	nht = rcu_dereference_bh(tbl->nht);
2288
2289	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2290		if (h > s_h)
2291			s_idx = 0;
2292		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2293		     n != NULL;
2294		     n = rcu_dereference_bh(n->next)) {
2295			if (!net_eq(dev_net(n->dev), net))
2296				continue;
2297			if (neigh_ifindex_filtered(n->dev, filter_idx))
2298				continue;
2299			if (neigh_master_filtered(n->dev, filter_master_idx))
2300				continue;
2301			if (idx < s_idx)
2302				goto next;
2303			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2304					    cb->nlh->nlmsg_seq,
2305					    RTM_NEWNEIGH,
2306					    flags) < 0) {
2307				rc = -1;
 
 
2308				goto out;
2309			}
2310next:
2311			idx++;
2312		}
2313	}
2314	rc = skb->len;
2315out:
2316	rcu_read_unlock_bh();
2317	cb->args[1] = h;
2318	cb->args[2] = idx;
2319	return rc;
2320}
2321
2322static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2323			     struct netlink_callback *cb)
 
2324{
2325	struct pneigh_entry *n;
2326	struct net *net = sock_net(skb->sk);
2327	int rc, h, s_h = cb->args[3];
2328	int idx, s_idx = idx = cb->args[4];
 
 
 
 
2329
2330	read_lock_bh(&tbl->lock);
2331
2332	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2333		if (h > s_h)
2334			s_idx = 0;
2335		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2336			if (pneigh_net(n) != net)
2337				continue;
2338			if (idx < s_idx)
 
2339				goto next;
2340			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2341					    cb->nlh->nlmsg_seq,
2342					    RTM_NEWNEIGH,
2343					    NLM_F_MULTI, tbl) < 0) {
2344				read_unlock_bh(&tbl->lock);
2345				rc = -1;
2346				goto out;
2347			}
2348		next:
2349			idx++;
2350		}
2351	}
2352
2353	read_unlock_bh(&tbl->lock);
2354	rc = skb->len;
2355out:
2356	cb->args[3] = h;
2357	cb->args[4] = idx;
2358	return rc;
 
 
 
 
 
 
 
 
 
2359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2360}
2361
2362static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2363{
 
 
2364	struct neigh_table *tbl;
2365	int t, family, s_t;
2366	int proxy = 0;
2367	int err;
2368
2369	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2370
2371	/* check for full ndmsg structure presence, family member is
2372	 * the same for both structures
2373	 */
2374	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2375	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2376		proxy = 1;
2377
 
 
 
 
 
2378	s_t = cb->args[0];
2379
 
2380	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2381		tbl = neigh_tables[t];
2382
2383		if (!tbl)
2384			continue;
2385		if (t < s_t || (family && tbl->family != family))
2386			continue;
2387		if (t > s_t)
2388			memset(&cb->args[1], 0, sizeof(cb->args) -
2389						sizeof(cb->args[0]));
2390		if (proxy)
2391			err = pneigh_dump_table(tbl, skb, cb);
2392		else
2393			err = neigh_dump_table(tbl, skb, cb);
2394		if (err < 0)
2395			break;
2396	}
 
2397
2398	cb->args[0] = t;
2399	return skb->len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2400}
2401
2402void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2403{
2404	int chain;
2405	struct neigh_hash_table *nht;
2406
2407	rcu_read_lock_bh();
2408	nht = rcu_dereference_bh(tbl->nht);
2409
2410	read_lock(&tbl->lock); /* avoid resizes */
2411	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2412		struct neighbour *n;
2413
2414		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2415		     n != NULL;
2416		     n = rcu_dereference_bh(n->next))
2417			cb(n, cookie);
2418	}
2419	read_unlock(&tbl->lock);
2420	rcu_read_unlock_bh();
2421}
2422EXPORT_SYMBOL(neigh_for_each);
2423
2424/* The tbl->lock must be held as a writer and BH disabled. */
2425void __neigh_for_each_release(struct neigh_table *tbl,
2426			      int (*cb)(struct neighbour *))
2427{
2428	int chain;
2429	struct neigh_hash_table *nht;
 
2430
2431	nht = rcu_dereference_protected(tbl->nht,
2432					lockdep_is_held(&tbl->lock));
2433	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
 
2434		struct neighbour *n;
2435		struct neighbour __rcu **np;
2436
2437		np = &nht->hash_buckets[chain];
2438		while ((n = rcu_dereference_protected(*np,
2439					lockdep_is_held(&tbl->lock))) != NULL) {
2440			int release;
2441
2442			write_lock(&n->lock);
2443			release = cb(n);
2444			if (release) {
2445				rcu_assign_pointer(*np,
2446					rcu_dereference_protected(n->next,
2447						lockdep_is_held(&tbl->lock)));
2448				n->dead = 1;
2449			} else
2450				np = &n->next;
2451			write_unlock(&n->lock);
2452			if (release)
2453				neigh_cleanup_and_release(n);
2454		}
2455	}
2456}
2457EXPORT_SYMBOL(__neigh_for_each_release);
2458
2459int neigh_xmit(int index, struct net_device *dev,
2460	       const void *addr, struct sk_buff *skb)
2461{
2462	int err = -EAFNOSUPPORT;
 
2463	if (likely(index < NEIGH_NR_TABLES)) {
2464		struct neigh_table *tbl;
2465		struct neighbour *neigh;
2466
2467		tbl = neigh_tables[index];
 
2468		if (!tbl)
2469			goto out;
2470		neigh = __neigh_lookup_noref(tbl, addr, dev);
 
 
 
 
 
 
2471		if (!neigh)
2472			neigh = __neigh_create(tbl, addr, dev, false);
2473		err = PTR_ERR(neigh);
2474		if (IS_ERR(neigh))
 
2475			goto out_kfree_skb;
2476		err = neigh->output(neigh, skb);
 
 
 
2477	}
2478	else if (index == NEIGH_LINK_TABLE) {
2479		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2480				      addr, NULL, skb->len);
2481		if (err < 0)
2482			goto out_kfree_skb;
2483		err = dev_queue_xmit(skb);
2484	}
2485out:
2486	return err;
2487out_kfree_skb:
2488	kfree_skb(skb);
2489	goto out;
2490}
2491EXPORT_SYMBOL(neigh_xmit);
2492
2493#ifdef CONFIG_PROC_FS
2494
2495static struct neighbour *neigh_get_first(struct seq_file *seq)
 
 
2496{
2497	struct neigh_seq_state *state = seq->private;
2498	struct net *net = seq_file_net(seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2499	struct neigh_hash_table *nht = state->nht;
2500	struct neighbour *n = NULL;
2501	int bucket = state->bucket;
2502
2503	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2504	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2505		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2506
2507		while (n) {
2508			if (!net_eq(dev_net(n->dev), net))
2509				goto next;
2510			if (state->neigh_sub_iter) {
2511				loff_t fakep = 0;
2512				void *v;
2513
2514				v = state->neigh_sub_iter(state, n, &fakep);
2515				if (!v)
2516					goto next;
2517			}
2518			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2519				break;
2520			if (n->nud_state & ~NUD_NOARP)
2521				break;
2522next:
2523			n = rcu_dereference_bh(n->next);
2524		}
2525
2526		if (n)
2527			break;
2528	}
2529	state->bucket = bucket;
2530
2531	return n;
2532}
2533
2534static struct neighbour *neigh_get_next(struct seq_file *seq,
2535					struct neighbour *n,
2536					loff_t *pos)
2537{
2538	struct neigh_seq_state *state = seq->private;
2539	struct net *net = seq_file_net(seq);
2540	struct neigh_hash_table *nht = state->nht;
2541
2542	if (state->neigh_sub_iter) {
2543		void *v = state->neigh_sub_iter(state, n, pos);
 
2544		if (v)
2545			return n;
2546	}
2547	n = rcu_dereference_bh(n->next);
2548
2549	while (1) {
2550		while (n) {
2551			if (!net_eq(dev_net(n->dev), net))
2552				goto next;
2553			if (state->neigh_sub_iter) {
2554				void *v = state->neigh_sub_iter(state, n, pos);
2555				if (v)
2556					return n;
2557				goto next;
2558			}
2559			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2560				break;
2561
2562			if (n->nud_state & ~NUD_NOARP)
2563				break;
2564next:
2565			n = rcu_dereference_bh(n->next);
 
2566		}
2567
2568		if (n)
2569			break;
2570
2571		if (++state->bucket >= (1 << nht->hash_shift))
2572			break;
2573
2574		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2575	}
2576
 
 
2577	if (n && pos)
2578		--(*pos);
 
2579	return n;
2580}
2581
2582static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2583{
2584	struct neighbour *n = neigh_get_first(seq);
2585
2586	if (n) {
2587		--(*pos);
2588		while (*pos) {
2589			n = neigh_get_next(seq, n, pos);
2590			if (!n)
2591				break;
2592		}
2593	}
2594	return *pos ? NULL : n;
2595}
2596
2597static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2598{
2599	struct neigh_seq_state *state = seq->private;
2600	struct net *net = seq_file_net(seq);
2601	struct neigh_table *tbl = state->tbl;
2602	struct pneigh_entry *pn = NULL;
2603	int bucket = state->bucket;
2604
2605	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2606	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2607		pn = tbl->phash_buckets[bucket];
2608		while (pn && !net_eq(pneigh_net(pn), net))
2609			pn = pn->next;
2610		if (pn)
2611			break;
2612	}
2613	state->bucket = bucket;
2614
2615	return pn;
2616}
2617
2618static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2619					    struct pneigh_entry *pn,
2620					    loff_t *pos)
2621{
2622	struct neigh_seq_state *state = seq->private;
2623	struct net *net = seq_file_net(seq);
2624	struct neigh_table *tbl = state->tbl;
2625
2626	do {
2627		pn = pn->next;
2628	} while (pn && !net_eq(pneigh_net(pn), net));
2629
2630	while (!pn) {
2631		if (++state->bucket > PNEIGH_HASHMASK)
2632			break;
2633		pn = tbl->phash_buckets[state->bucket];
2634		while (pn && !net_eq(pneigh_net(pn), net))
2635			pn = pn->next;
2636		if (pn)
2637			break;
2638	}
2639
2640	if (pn && pos)
2641		--(*pos);
2642
2643	return pn;
2644}
2645
2646static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2647{
2648	struct pneigh_entry *pn = pneigh_get_first(seq);
2649
2650	if (pn) {
2651		--(*pos);
2652		while (*pos) {
2653			pn = pneigh_get_next(seq, pn, pos);
2654			if (!pn)
2655				break;
2656		}
2657	}
2658	return *pos ? NULL : pn;
2659}
2660
2661static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2662{
2663	struct neigh_seq_state *state = seq->private;
2664	void *rc;
2665	loff_t idxpos = *pos;
2666
2667	rc = neigh_get_idx(seq, &idxpos);
2668	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2669		rc = pneigh_get_idx(seq, &idxpos);
2670
2671	return rc;
2672}
2673
2674void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2675	__acquires(rcu_bh)
 
2676{
2677	struct neigh_seq_state *state = seq->private;
2678
2679	state->tbl = tbl;
2680	state->bucket = 0;
2681	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2682
2683	rcu_read_lock_bh();
2684	state->nht = rcu_dereference_bh(tbl->nht);
 
2685
2686	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2687}
2688EXPORT_SYMBOL(neigh_seq_start);
2689
2690void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2691{
2692	struct neigh_seq_state *state;
2693	void *rc;
2694
2695	if (v == SEQ_START_TOKEN) {
2696		rc = neigh_get_first(seq);
2697		goto out;
2698	}
2699
2700	state = seq->private;
2701	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2702		rc = neigh_get_next(seq, v, NULL);
2703		if (rc)
2704			goto out;
2705		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2706			rc = pneigh_get_first(seq);
2707	} else {
2708		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2709		rc = pneigh_get_next(seq, v, NULL);
2710	}
2711out:
2712	++(*pos);
2713	return rc;
2714}
2715EXPORT_SYMBOL(neigh_seq_next);
2716
2717void neigh_seq_stop(struct seq_file *seq, void *v)
2718	__releases(rcu_bh)
 
2719{
2720	rcu_read_unlock_bh();
 
 
 
 
2721}
2722EXPORT_SYMBOL(neigh_seq_stop);
2723
2724/* statistics via seq_file */
2725
2726static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2727{
2728	struct neigh_table *tbl = seq->private;
2729	int cpu;
2730
2731	if (*pos == 0)
2732		return SEQ_START_TOKEN;
2733
2734	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2735		if (!cpu_possible(cpu))
2736			continue;
2737		*pos = cpu+1;
2738		return per_cpu_ptr(tbl->stats, cpu);
2739	}
2740	return NULL;
2741}
2742
2743static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2744{
2745	struct neigh_table *tbl = seq->private;
2746	int cpu;
2747
2748	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2749		if (!cpu_possible(cpu))
2750			continue;
2751		*pos = cpu+1;
2752		return per_cpu_ptr(tbl->stats, cpu);
2753	}
 
2754	return NULL;
2755}
2756
2757static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2758{
2759
2760}
2761
2762static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2763{
2764	struct neigh_table *tbl = seq->private;
2765	struct neigh_statistics *st = v;
2766
2767	if (v == SEQ_START_TOKEN) {
2768		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2769		return 0;
2770	}
2771
2772	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2773			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
 
2774		   atomic_read(&tbl->entries),
2775
2776		   st->allocs,
2777		   st->destroys,
2778		   st->hash_grows,
2779
2780		   st->lookups,
2781		   st->hits,
2782
2783		   st->res_failed,
2784
2785		   st->rcv_probes_mcast,
2786		   st->rcv_probes_ucast,
2787
2788		   st->periodic_gc_runs,
2789		   st->forced_gc_runs,
2790		   st->unres_discards,
2791		   st->table_fulls
2792		   );
2793
2794	return 0;
2795}
2796
2797static const struct seq_operations neigh_stat_seq_ops = {
2798	.start	= neigh_stat_seq_start,
2799	.next	= neigh_stat_seq_next,
2800	.stop	= neigh_stat_seq_stop,
2801	.show	= neigh_stat_seq_show,
2802};
2803
2804static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2805{
2806	int ret = seq_open(file, &neigh_stat_seq_ops);
2807
2808	if (!ret) {
2809		struct seq_file *sf = file->private_data;
2810		sf->private = PDE_DATA(inode);
2811	}
2812	return ret;
2813};
2814
2815static const struct file_operations neigh_stat_seq_fops = {
2816	.owner	 = THIS_MODULE,
2817	.open 	 = neigh_stat_seq_open,
2818	.read	 = seq_read,
2819	.llseek	 = seq_lseek,
2820	.release = seq_release,
2821};
2822
2823#endif /* CONFIG_PROC_FS */
2824
2825static inline size_t neigh_nlmsg_size(void)
2826{
2827	return NLMSG_ALIGN(sizeof(struct ndmsg))
2828	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2829	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2830	       + nla_total_size(sizeof(struct nda_cacheinfo))
2831	       + nla_total_size(4); /* NDA_PROBES */
2832}
2833
2834static void __neigh_notify(struct neighbour *n, int type, int flags)
2835{
2836	struct net *net = dev_net(n->dev);
2837	struct sk_buff *skb;
2838	int err = -ENOBUFS;
 
2839
 
 
2840	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2841	if (skb == NULL)
2842		goto errout;
2843
2844	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2845	if (err < 0) {
2846		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2847		WARN_ON(err == -EMSGSIZE);
2848		kfree_skb(skb);
2849		goto errout;
2850	}
2851	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2852	return;
2853errout:
2854	if (err < 0)
2855		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
 
2856}
2857
2858void neigh_app_ns(struct neighbour *n)
2859{
2860	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2861}
2862EXPORT_SYMBOL(neigh_app_ns);
2863
2864#ifdef CONFIG_SYSCTL
2865static int zero;
2866static int int_max = INT_MAX;
2867static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2868
2869static int proc_unres_qlen(struct ctl_table *ctl, int write,
2870			   void __user *buffer, size_t *lenp, loff_t *ppos)
2871{
2872	int size, ret;
2873	struct ctl_table tmp = *ctl;
2874
2875	tmp.extra1 = &zero;
2876	tmp.extra2 = &unres_qlen_max;
2877	tmp.data = &size;
2878
2879	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2880	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2881
2882	if (write && !ret)
2883		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2884	return ret;
2885}
2886
2887static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2888						   int family)
2889{
2890	switch (family) {
2891	case AF_INET:
2892		return __in_dev_arp_parms_get_rcu(dev);
2893	case AF_INET6:
2894		return __in6_dev_nd_parms_get_rcu(dev);
2895	}
2896	return NULL;
2897}
2898
2899static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2900				  int index)
2901{
2902	struct net_device *dev;
2903	int family = neigh_parms_family(p);
2904
2905	rcu_read_lock();
2906	for_each_netdev_rcu(net, dev) {
2907		struct neigh_parms *dst_p =
2908				neigh_get_dev_parms_rcu(dev, family);
2909
2910		if (dst_p && !test_bit(index, dst_p->data_state))
2911			dst_p->data[index] = p->data[index];
2912	}
2913	rcu_read_unlock();
2914}
2915
2916static void neigh_proc_update(struct ctl_table *ctl, int write)
2917{
2918	struct net_device *dev = ctl->extra1;
2919	struct neigh_parms *p = ctl->extra2;
2920	struct net *net = neigh_parms_net(p);
2921	int index = (int *) ctl->data - p->data;
2922
2923	if (!write)
2924		return;
2925
2926	set_bit(index, p->data_state);
 
 
2927	if (!dev) /* NULL dev means this is default value */
2928		neigh_copy_dflt_parms(net, p, index);
2929}
2930
2931static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2932					   void __user *buffer,
2933					   size_t *lenp, loff_t *ppos)
2934{
2935	struct ctl_table tmp = *ctl;
2936	int ret;
2937
2938	tmp.extra1 = &zero;
2939	tmp.extra2 = &int_max;
2940
2941	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2942	neigh_proc_update(ctl, write);
2943	return ret;
2944}
2945
2946int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2947			void __user *buffer, size_t *lenp, loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2948{
2949	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2950
2951	neigh_proc_update(ctl, write);
2952	return ret;
2953}
2954EXPORT_SYMBOL(neigh_proc_dointvec);
2955
2956int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2957				void __user *buffer,
2958				size_t *lenp, loff_t *ppos)
2959{
2960	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2961
2962	neigh_proc_update(ctl, write);
2963	return ret;
2964}
2965EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2966
2967static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2968					      void __user *buffer,
2969					      size_t *lenp, loff_t *ppos)
2970{
2971	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2972
2973	neigh_proc_update(ctl, write);
2974	return ret;
2975}
2976
2977int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2978				   void __user *buffer,
2979				   size_t *lenp, loff_t *ppos)
2980{
2981	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2982
2983	neigh_proc_update(ctl, write);
2984	return ret;
2985}
2986EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2987
2988static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2989					  void __user *buffer,
2990					  size_t *lenp, loff_t *ppos)
2991{
2992	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2993
2994	neigh_proc_update(ctl, write);
2995	return ret;
2996}
2997
2998static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
2999					  void __user *buffer,
3000					  size_t *lenp, loff_t *ppos)
3001{
3002	struct neigh_parms *p = ctl->extra2;
3003	int ret;
3004
3005	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3006		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3007	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3008		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3009	else
3010		ret = -1;
3011
3012	if (write && ret == 0) {
3013		/* update reachable_time as well, otherwise, the change will
3014		 * only be effective after the next time neigh_periodic_work
3015		 * decides to recompute it
3016		 */
3017		p->reachable_time =
3018			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3019	}
3020	return ret;
3021}
3022
3023#define NEIGH_PARMS_DATA_OFFSET(index)	\
3024	(&((struct neigh_parms *) 0)->data[index])
3025
3026#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3027	[NEIGH_VAR_ ## attr] = { \
3028		.procname	= name, \
3029		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3030		.maxlen		= sizeof(int), \
3031		.mode		= mval, \
3032		.proc_handler	= proc, \
3033	}
3034
3035#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3036	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3037
3038#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3039	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3040
3041#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3042	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3043
3044#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3045	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3046
3047#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3048	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3049
3050#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3051	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3052
3053static struct neigh_sysctl_table {
3054	struct ctl_table_header *sysctl_header;
3055	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3056} neigh_sysctl_template __read_mostly = {
3057	.neigh_vars = {
3058		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3059		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3060		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3061		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3062		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3063		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3064		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
 
 
3065		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3066		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3067		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3068		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3069		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3070		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3071		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3072		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3073		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3074		[NEIGH_VAR_GC_INTERVAL] = {
3075			.procname	= "gc_interval",
3076			.maxlen		= sizeof(int),
3077			.mode		= 0644,
3078			.proc_handler	= proc_dointvec_jiffies,
3079		},
3080		[NEIGH_VAR_GC_THRESH1] = {
3081			.procname	= "gc_thresh1",
3082			.maxlen		= sizeof(int),
3083			.mode		= 0644,
3084			.extra1 	= &zero,
3085			.extra2		= &int_max,
3086			.proc_handler	= proc_dointvec_minmax,
3087		},
3088		[NEIGH_VAR_GC_THRESH2] = {
3089			.procname	= "gc_thresh2",
3090			.maxlen		= sizeof(int),
3091			.mode		= 0644,
3092			.extra1 	= &zero,
3093			.extra2		= &int_max,
3094			.proc_handler	= proc_dointvec_minmax,
3095		},
3096		[NEIGH_VAR_GC_THRESH3] = {
3097			.procname	= "gc_thresh3",
3098			.maxlen		= sizeof(int),
3099			.mode		= 0644,
3100			.extra1 	= &zero,
3101			.extra2		= &int_max,
3102			.proc_handler	= proc_dointvec_minmax,
3103		},
3104		{},
3105	},
3106};
3107
3108int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3109			  proc_handler *handler)
3110{
3111	int i;
3112	struct neigh_sysctl_table *t;
3113	const char *dev_name_source;
3114	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3115	char *p_name;
 
3116
3117	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3118	if (!t)
3119		goto err;
3120
3121	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3122		t->neigh_vars[i].data += (long) p;
3123		t->neigh_vars[i].extra1 = dev;
3124		t->neigh_vars[i].extra2 = p;
3125	}
3126
 
3127	if (dev) {
3128		dev_name_source = dev->name;
3129		/* Terminate the table early */
3130		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3131		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3132	} else {
3133		struct neigh_table *tbl = p->tbl;
3134		dev_name_source = "default";
3135		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3136		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3137		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3138		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3139	}
3140
3141	if (handler) {
3142		/* RetransTime */
3143		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3144		/* ReachableTime */
3145		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3146		/* RetransTime (in milliseconds)*/
3147		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3148		/* ReachableTime (in milliseconds) */
3149		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3150	} else {
3151		/* Those handlers will update p->reachable_time after
3152		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3153		 * applied after the next neighbour update instead of waiting for
3154		 * neigh_periodic_work to update its value (can be multiple minutes)
3155		 * So any handler that replaces them should do this as well
3156		 */
3157		/* ReachableTime */
3158		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3159			neigh_proc_base_reachable_time;
3160		/* ReachableTime (in milliseconds) */
3161		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3162			neigh_proc_base_reachable_time;
3163	}
3164
3165	/* Don't export sysctls to unprivileged users */
3166	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3167		t->neigh_vars[0].procname = NULL;
3168
3169	switch (neigh_parms_family(p)) {
3170	case AF_INET:
3171	      p_name = "ipv4";
3172	      break;
3173	case AF_INET6:
3174	      p_name = "ipv6";
3175	      break;
3176	default:
3177	      BUG();
3178	}
3179
3180	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3181		p_name, dev_name_source);
3182	t->sysctl_header =
3183		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
 
3184	if (!t->sysctl_header)
3185		goto free;
3186
3187	p->sysctl_table = t;
3188	return 0;
3189
3190free:
3191	kfree(t);
3192err:
3193	return -ENOBUFS;
3194}
3195EXPORT_SYMBOL(neigh_sysctl_register);
3196
3197void neigh_sysctl_unregister(struct neigh_parms *p)
3198{
3199	if (p->sysctl_table) {
3200		struct neigh_sysctl_table *t = p->sysctl_table;
3201		p->sysctl_table = NULL;
3202		unregister_net_sysctl_table(t->sysctl_header);
3203		kfree(t);
3204	}
3205}
3206EXPORT_SYMBOL(neigh_sysctl_unregister);
3207
3208#endif	/* CONFIG_SYSCTL */
3209
 
 
 
 
 
 
 
 
 
3210static int __init neigh_init(void)
3211{
3212	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3213	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3214	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3215
3216	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3217		      NULL);
3218	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3219
3220	return 0;
3221}
3222
3223subsys_initcall(neigh_init);
3224
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Generic address resolution entity
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   8 *
 
 
 
 
 
   9 *	Fixes:
  10 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  11 *	Harald Welte		Add neighbour cache statistics like rtstat
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/slab.h>
  17#include <linux/types.h>
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/socket.h>
  21#include <linux/netdevice.h>
  22#include <linux/proc_fs.h>
  23#ifdef CONFIG_SYSCTL
  24#include <linux/sysctl.h>
  25#endif
  26#include <linux/times.h>
  27#include <net/net_namespace.h>
  28#include <net/neighbour.h>
  29#include <net/arp.h>
  30#include <net/dst.h>
  31#include <net/sock.h>
  32#include <net/netevent.h>
  33#include <net/netlink.h>
  34#include <linux/rtnetlink.h>
  35#include <linux/random.h>
  36#include <linux/string.h>
  37#include <linux/log2.h>
  38#include <linux/inetdevice.h>
  39#include <net/addrconf.h>
  40
  41#include <trace/events/neigh.h>
  42
  43#define NEIGH_DEBUG 1
  44#define neigh_dbg(level, fmt, ...)		\
  45do {						\
  46	if (level <= NEIGH_DEBUG)		\
  47		pr_debug(fmt, ##__VA_ARGS__);	\
  48} while (0)
  49
  50#define PNEIGH_HASHMASK		0xF
  51
  52static void neigh_timer_handler(struct timer_list *t);
  53static void __neigh_notify(struct neighbour *n, int type, int flags,
  54			   u32 pid);
  55static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
  56static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
  57				    struct net_device *dev);
  58
  59#ifdef CONFIG_PROC_FS
  60static const struct seq_operations neigh_stat_seq_ops;
  61#endif
  62
  63static struct hlist_head *neigh_get_dev_table(struct net_device *dev, int family)
  64{
  65	int i;
  66
  67	switch (family) {
  68	default:
  69		DEBUG_NET_WARN_ON_ONCE(1);
  70		fallthrough; /* to avoid panic by null-ptr-deref */
  71	case AF_INET:
  72		i = NEIGH_ARP_TABLE;
  73		break;
  74	case AF_INET6:
  75		i = NEIGH_ND_TABLE;
  76		break;
  77	}
  78
  79	return &dev->neighbours[i];
  80}
  81
  82/*
  83   Neighbour hash table buckets are protected with rwlock tbl->lock.
  84
  85   - All the scans/updates to hash buckets MUST be made under this lock.
  86   - NOTHING clever should be made under this lock: no callbacks
  87     to protocol backends, no attempts to send something to network.
  88     It will result in deadlocks, if backend/driver wants to use neighbour
  89     cache.
  90   - If the entry requires some non-trivial actions, increase
  91     its reference count and release table lock.
  92
  93   Neighbour entries are protected:
  94   - with reference count.
  95   - with rwlock neigh->lock
  96
  97   Reference count prevents destruction.
  98
  99   neigh->lock mainly serializes ll address data and its validity state.
 100   However, the same lock is used to protect another entry fields:
 101    - timer
 102    - resolution queue
 103
 104   Again, nothing clever shall be made under neigh->lock,
 105   the most complicated procedure, which we allow is dev->hard_header.
 106   It is supposed, that dev->hard_header is simplistic and does
 107   not make callbacks to neighbour tables.
 108 */
 109
 110static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
 111{
 112	kfree_skb(skb);
 113	return -ENETDOWN;
 114}
 115
 116static void neigh_cleanup_and_release(struct neighbour *neigh)
 117{
 118	trace_neigh_cleanup_and_release(neigh, 0);
 119	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
 120	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
 
 121	neigh_release(neigh);
 122}
 123
 124/*
 125 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 126 * It corresponds to default IPv6 settings and is not overridable,
 127 * because it is really reasonable choice.
 128 */
 129
 130unsigned long neigh_rand_reach_time(unsigned long base)
 131{
 132	return base ? get_random_u32_below(base) + (base >> 1) : 0;
 133}
 134EXPORT_SYMBOL(neigh_rand_reach_time);
 135
 136static void neigh_mark_dead(struct neighbour *n)
 137{
 138	n->dead = 1;
 139	if (!list_empty(&n->gc_list)) {
 140		list_del_init(&n->gc_list);
 141		atomic_dec(&n->tbl->gc_entries);
 142	}
 143	if (!list_empty(&n->managed_list))
 144		list_del_init(&n->managed_list);
 145}
 146
 147static void neigh_update_gc_list(struct neighbour *n)
 148{
 149	bool on_gc_list, exempt_from_gc;
 150
 151	write_lock_bh(&n->tbl->lock);
 152	write_lock(&n->lock);
 153	if (n->dead)
 154		goto out;
 155
 156	/* remove from the gc list if new state is permanent or if neighbor
 157	 * is externally learned; otherwise entry should be on the gc list
 158	 */
 159	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
 160			 n->flags & NTF_EXT_LEARNED;
 161	on_gc_list = !list_empty(&n->gc_list);
 162
 163	if (exempt_from_gc && on_gc_list) {
 164		list_del_init(&n->gc_list);
 165		atomic_dec(&n->tbl->gc_entries);
 166	} else if (!exempt_from_gc && !on_gc_list) {
 167		/* add entries to the tail; cleaning removes from the front */
 168		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 169		atomic_inc(&n->tbl->gc_entries);
 170	}
 171out:
 172	write_unlock(&n->lock);
 173	write_unlock_bh(&n->tbl->lock);
 174}
 175
 176static void neigh_update_managed_list(struct neighbour *n)
 177{
 178	bool on_managed_list, add_to_managed;
 179
 180	write_lock_bh(&n->tbl->lock);
 181	write_lock(&n->lock);
 182	if (n->dead)
 183		goto out;
 184
 185	add_to_managed = n->flags & NTF_MANAGED;
 186	on_managed_list = !list_empty(&n->managed_list);
 187
 188	if (!add_to_managed && on_managed_list)
 189		list_del_init(&n->managed_list);
 190	else if (add_to_managed && !on_managed_list)
 191		list_add_tail(&n->managed_list, &n->tbl->managed_list);
 192out:
 193	write_unlock(&n->lock);
 194	write_unlock_bh(&n->tbl->lock);
 195}
 196
 197static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
 198			       bool *gc_update, bool *managed_update)
 199{
 200	u32 ndm_flags, old_flags = neigh->flags;
 201
 202	if (!(flags & NEIGH_UPDATE_F_ADMIN))
 203		return;
 204
 205	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
 206	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
 207
 208	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
 209		if (ndm_flags & NTF_EXT_LEARNED)
 210			neigh->flags |= NTF_EXT_LEARNED;
 211		else
 212			neigh->flags &= ~NTF_EXT_LEARNED;
 213		*notify = 1;
 214		*gc_update = true;
 215	}
 216	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
 217		if (ndm_flags & NTF_MANAGED)
 218			neigh->flags |= NTF_MANAGED;
 219		else
 220			neigh->flags &= ~NTF_MANAGED;
 221		*notify = 1;
 222		*managed_update = true;
 223	}
 224}
 225
 226bool neigh_remove_one(struct neighbour *n)
 227{
 228	bool retval = false;
 229
 230	write_lock(&n->lock);
 231	if (refcount_read(&n->refcnt) == 1) {
 232		hlist_del_rcu(&n->hash);
 233		hlist_del_rcu(&n->dev_list);
 234		neigh_mark_dead(n);
 235		retval = true;
 236	}
 237	write_unlock(&n->lock);
 238	if (retval)
 239		neigh_cleanup_and_release(n);
 240	return retval;
 241}
 242
 243static int neigh_forced_gc(struct neigh_table *tbl)
 244{
 245	int max_clean = atomic_read(&tbl->gc_entries) -
 246			READ_ONCE(tbl->gc_thresh2);
 247	u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
 248	unsigned long tref = jiffies - 5 * HZ;
 249	struct neighbour *n, *tmp;
 250	int shrunk = 0;
 251	int loop = 0;
 
 252
 253	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 254
 255	write_lock_bh(&tbl->lock);
 
 
 
 
 
 256
 257	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
 258		if (refcount_read(&n->refcnt) == 1) {
 259			bool remove = false;
 260
 
 
 
 261			write_lock(&n->lock);
 262			if ((n->nud_state == NUD_FAILED) ||
 263			    (n->nud_state == NUD_NOARP) ||
 264			    (tbl->is_multicast &&
 265			     tbl->is_multicast(n->primary_key)) ||
 266			    !time_in_range(n->updated, tref, jiffies))
 267				remove = true;
 
 
 
 
 
 268			write_unlock(&n->lock);
 269
 270			if (remove && neigh_remove_one(n))
 271				shrunk++;
 272			if (shrunk >= max_clean)
 273				break;
 274			if (++loop == 16) {
 275				if (ktime_get_ns() > tmax)
 276					goto unlock;
 277				loop = 0;
 278			}
 279		}
 280	}
 281
 282	WRITE_ONCE(tbl->last_flush, jiffies);
 283unlock:
 284	write_unlock_bh(&tbl->lock);
 285
 286	return shrunk;
 287}
 288
 289static void neigh_add_timer(struct neighbour *n, unsigned long when)
 290{
 291	/* Use safe distance from the jiffies - LONG_MAX point while timer
 292	 * is running in DELAY/PROBE state but still show to user space
 293	 * large times in the past.
 294	 */
 295	unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
 296
 297	neigh_hold(n);
 298	if (!time_in_range(n->confirmed, mint, jiffies))
 299		n->confirmed = mint;
 300	if (time_before(n->used, n->confirmed))
 301		n->used = n->confirmed;
 302	if (unlikely(mod_timer(&n->timer, when))) {
 303		printk("NEIGH: BUG, double timer add, state is %x\n",
 304		       n->nud_state);
 305		dump_stack();
 306	}
 307}
 308
 309static int neigh_del_timer(struct neighbour *n)
 310{
 311	if ((n->nud_state & NUD_IN_TIMER) &&
 312	    del_timer(&n->timer)) {
 313		neigh_release(n);
 314		return 1;
 315	}
 316	return 0;
 317}
 318
 319static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
 320						   int family)
 321{
 322	switch (family) {
 323	case AF_INET:
 324		return __in_dev_arp_parms_get_rcu(dev);
 325	case AF_INET6:
 326		return __in6_dev_nd_parms_get_rcu(dev);
 327	}
 328	return NULL;
 329}
 330
 331static void neigh_parms_qlen_dec(struct net_device *dev, int family)
 332{
 333	struct neigh_parms *p;
 334
 335	rcu_read_lock();
 336	p = neigh_get_dev_parms_rcu(dev, family);
 337	if (p)
 338		p->qlen--;
 339	rcu_read_unlock();
 340}
 341
 342static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
 343			       int family)
 344{
 345	struct sk_buff_head tmp;
 346	unsigned long flags;
 347	struct sk_buff *skb;
 348
 349	skb_queue_head_init(&tmp);
 350	spin_lock_irqsave(&list->lock, flags);
 351	skb = skb_peek(list);
 352	while (skb != NULL) {
 353		struct sk_buff *skb_next = skb_peek_next(skb, list);
 354		struct net_device *dev = skb->dev;
 355
 356		if (net == NULL || net_eq(dev_net(dev), net)) {
 357			neigh_parms_qlen_dec(dev, family);
 358			__skb_unlink(skb, list);
 359			__skb_queue_tail(&tmp, skb);
 360		}
 361		skb = skb_next;
 362	}
 363	spin_unlock_irqrestore(&list->lock, flags);
 364
 365	while ((skb = __skb_dequeue(&tmp))) {
 366		dev_put(skb->dev);
 367		kfree_skb(skb);
 368	}
 369}
 370
 371static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
 372			    bool skip_perm)
 373{
 374	struct hlist_head *dev_head;
 375	struct hlist_node *tmp;
 376	struct neighbour *n;
 
 
 377
 378	dev_head = neigh_get_dev_table(dev, tbl->family);
 
 
 379
 380	hlist_for_each_entry_safe(n, tmp, dev_head, dev_list) {
 381		if (skip_perm && n->nud_state & NUD_PERMANENT)
 382			continue;
 
 
 
 
 
 
 
 
 
 383
 384		hlist_del_rcu(&n->hash);
 385		hlist_del_rcu(&n->dev_list);
 386		write_lock(&n->lock);
 387		neigh_del_timer(n);
 388		neigh_mark_dead(n);
 389		if (refcount_read(&n->refcnt) != 1) {
 390			/* The most unpleasant situation.
 391			 * We must destroy neighbour entry,
 392			 * but someone still uses it.
 393			 *
 394			 * The destroy will be delayed until
 395			 * the last user releases us, but
 396			 * we must kill timers etc. and move
 397			 * it to safe state.
 398			 */
 399			__skb_queue_purge(&n->arp_queue);
 400			n->arp_queue_len_bytes = 0;
 401			WRITE_ONCE(n->output, neigh_blackhole);
 402			if (n->nud_state & NUD_VALID)
 403				n->nud_state = NUD_NOARP;
 404			else
 405				n->nud_state = NUD_NONE;
 406			neigh_dbg(2, "neigh %p is stray\n", n);
 407		}
 408		write_unlock(&n->lock);
 409		neigh_cleanup_and_release(n);
 410	}
 411}
 412
 413void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 414{
 415	write_lock_bh(&tbl->lock);
 416	neigh_flush_dev(tbl, dev, false);
 417	write_unlock_bh(&tbl->lock);
 418}
 419EXPORT_SYMBOL(neigh_changeaddr);
 420
 421static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
 422			  bool skip_perm)
 423{
 424	write_lock_bh(&tbl->lock);
 425	neigh_flush_dev(tbl, dev, skip_perm);
 426	pneigh_ifdown_and_unlock(tbl, dev);
 427	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
 428			   tbl->family);
 429	if (skb_queue_empty_lockless(&tbl->proxy_queue))
 430		del_timer_sync(&tbl->proxy_timer);
 431	return 0;
 432}
 433
 434int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
 435{
 436	__neigh_ifdown(tbl, dev, true);
 437	return 0;
 438}
 439EXPORT_SYMBOL(neigh_carrier_down);
 440
 441int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 442{
 443	__neigh_ifdown(tbl, dev, false);
 444	return 0;
 445}
 446EXPORT_SYMBOL(neigh_ifdown);
 447
 448static struct neighbour *neigh_alloc(struct neigh_table *tbl,
 449				     struct net_device *dev,
 450				     u32 flags, bool exempt_from_gc)
 451{
 452	struct neighbour *n = NULL;
 453	unsigned long now = jiffies;
 454	int entries, gc_thresh3;
 455
 456	if (exempt_from_gc)
 457		goto do_alloc;
 458
 459	entries = atomic_inc_return(&tbl->gc_entries) - 1;
 460	gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
 461	if (entries >= gc_thresh3 ||
 462	    (entries >= READ_ONCE(tbl->gc_thresh2) &&
 463	     time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
 464		if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
 465			net_info_ratelimited("%s: neighbor table overflow!\n",
 466					     tbl->id);
 467			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 468			goto out_entries;
 469		}
 470	}
 471
 472do_alloc:
 473	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 474	if (!n)
 475		goto out_entries;
 476
 477	__skb_queue_head_init(&n->arp_queue);
 478	rwlock_init(&n->lock);
 479	seqlock_init(&n->ha_lock);
 480	n->updated	  = n->used = now;
 481	n->nud_state	  = NUD_NONE;
 482	n->output	  = neigh_blackhole;
 483	n->flags	  = flags;
 484	seqlock_init(&n->hh.hh_lock);
 485	n->parms	  = neigh_parms_clone(&tbl->parms);
 486	timer_setup(&n->timer, neigh_timer_handler, 0);
 487
 488	NEIGH_CACHE_STAT_INC(tbl, allocs);
 489	n->tbl		  = tbl;
 490	refcount_set(&n->refcnt, 1);
 491	n->dead		  = 1;
 492	INIT_LIST_HEAD(&n->gc_list);
 493	INIT_LIST_HEAD(&n->managed_list);
 494
 495	atomic_inc(&tbl->entries);
 496out:
 497	return n;
 498
 499out_entries:
 500	if (!exempt_from_gc)
 501		atomic_dec(&tbl->gc_entries);
 502	goto out;
 503}
 504
 505static void neigh_get_hash_rnd(u32 *x)
 506{
 507	*x = get_random_u32() | 1;
 
 508}
 509
 510static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 511{
 512	size_t size = (1 << shift) * sizeof(struct hlist_head);
 513	struct hlist_head *hash_heads;
 514	struct neigh_hash_table *ret;
 
 515	int i;
 516
 517	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 518	if (!ret)
 519		return NULL;
 520
 521	hash_heads = kvzalloc(size, GFP_ATOMIC);
 522	if (!hash_heads) {
 
 
 
 
 523		kfree(ret);
 524		return NULL;
 525	}
 526	ret->hash_heads = hash_heads;
 527	ret->hash_shift = shift;
 528	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 529		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 530	return ret;
 531}
 532
 533static void neigh_hash_free_rcu(struct rcu_head *head)
 534{
 535	struct neigh_hash_table *nht = container_of(head,
 536						    struct neigh_hash_table,
 537						    rcu);
 
 
 538
 539	kvfree(nht->hash_heads);
 
 
 
 540	kfree(nht);
 541}
 542
 543static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 544						unsigned long new_shift)
 545{
 546	unsigned int i, hash;
 547	struct neigh_hash_table *new_nht, *old_nht;
 548
 549	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 550
 551	old_nht = rcu_dereference_protected(tbl->nht,
 552					    lockdep_is_held(&tbl->lock));
 553	new_nht = neigh_hash_alloc(new_shift);
 554	if (!new_nht)
 555		return old_nht;
 556
 557	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 558		struct hlist_node *tmp;
 559		struct neighbour *n;
 560
 561		neigh_for_each_in_bucket_safe(n, tmp, &old_nht->hash_heads[i]) {
 
 
 
 562			hash = tbl->hash(n->primary_key, n->dev,
 563					 new_nht->hash_rnd);
 564
 565			hash >>= (32 - new_nht->hash_shift);
 
 
 566
 567			hlist_del_rcu(&n->hash);
 568			hlist_add_head_rcu(&n->hash, &new_nht->hash_heads[hash]);
 
 
 
 569		}
 570	}
 571
 572	rcu_assign_pointer(tbl->nht, new_nht);
 573	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 574	return new_nht;
 575}
 576
 577struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 578			       struct net_device *dev)
 579{
 580	struct neighbour *n;
 581
 582	NEIGH_CACHE_STAT_INC(tbl, lookups);
 583
 584	rcu_read_lock();
 585	n = __neigh_lookup_noref(tbl, pkey, dev);
 586	if (n) {
 587		if (!refcount_inc_not_zero(&n->refcnt))
 588			n = NULL;
 589		NEIGH_CACHE_STAT_INC(tbl, hits);
 590	}
 591
 592	rcu_read_unlock();
 593	return n;
 594}
 595EXPORT_SYMBOL(neigh_lookup);
 596
 597static struct neighbour *
 598___neigh_create(struct neigh_table *tbl, const void *pkey,
 599		struct net_device *dev, u32 flags,
 600		bool exempt_from_gc, bool want_ref)
 601{
 602	u32 hash_val, key_len = tbl->key_len;
 603	struct neighbour *n1, *rc, *n;
 
 604	struct neigh_hash_table *nht;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 605	int error;
 
 
 606
 607	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
 608	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
 609	if (!n) {
 610		rc = ERR_PTR(-ENOBUFS);
 611		goto out;
 612	}
 613
 614	memcpy(n->primary_key, pkey, key_len);
 615	n->dev = dev;
 616	netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
 617
 618	/* Protocol specific setup. */
 619	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 620		rc = ERR_PTR(error);
 621		goto out_neigh_release;
 622	}
 623
 624	if (dev->netdev_ops->ndo_neigh_construct) {
 625		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
 626		if (error < 0) {
 627			rc = ERR_PTR(error);
 628			goto out_neigh_release;
 629		}
 630	}
 631
 632	/* Device specific setup. */
 633	if (n->parms->neigh_setup &&
 634	    (error = n->parms->neigh_setup(n)) < 0) {
 635		rc = ERR_PTR(error);
 636		goto out_neigh_release;
 637	}
 638
 639	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 640
 641	write_lock_bh(&tbl->lock);
 642	nht = rcu_dereference_protected(tbl->nht,
 643					lockdep_is_held(&tbl->lock));
 644
 645	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 646		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 647
 648	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 649
 650	if (n->parms->dead) {
 651		rc = ERR_PTR(-EINVAL);
 652		goto out_tbl_unlock;
 653	}
 654
 655	neigh_for_each_in_bucket(n1, &nht->hash_heads[hash_val]) {
 656		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
 
 
 
 
 657			if (want_ref)
 658				neigh_hold(n1);
 659			rc = n1;
 660			goto out_tbl_unlock;
 661		}
 662	}
 663
 664	n->dead = 0;
 665	if (!exempt_from_gc)
 666		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 667	if (n->flags & NTF_MANAGED)
 668		list_add_tail(&n->managed_list, &n->tbl->managed_list);
 669	if (want_ref)
 670		neigh_hold(n);
 671	hlist_add_head_rcu(&n->hash, &nht->hash_heads[hash_val]);
 672
 673	hlist_add_head_rcu(&n->dev_list,
 674			   neigh_get_dev_table(dev, tbl->family));
 675
 676	write_unlock_bh(&tbl->lock);
 677	neigh_dbg(2, "neigh %p is created\n", n);
 678	rc = n;
 679out:
 680	return rc;
 681out_tbl_unlock:
 682	write_unlock_bh(&tbl->lock);
 683out_neigh_release:
 684	if (!exempt_from_gc)
 685		atomic_dec(&tbl->gc_entries);
 686	neigh_release(n);
 687	goto out;
 688}
 689
 690struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 691				 struct net_device *dev, bool want_ref)
 692{
 693	bool exempt_from_gc = !!(dev->flags & IFF_LOOPBACK);
 694
 695	return ___neigh_create(tbl, pkey, dev, 0, exempt_from_gc, want_ref);
 696}
 697EXPORT_SYMBOL(__neigh_create);
 698
 699static u32 pneigh_hash(const void *pkey, unsigned int key_len)
 700{
 701	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 702	hash_val ^= (hash_val >> 16);
 703	hash_val ^= hash_val >> 8;
 704	hash_val ^= hash_val >> 4;
 705	hash_val &= PNEIGH_HASHMASK;
 706	return hash_val;
 707}
 708
 709static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 710					      struct net *net,
 711					      const void *pkey,
 712					      unsigned int key_len,
 713					      struct net_device *dev)
 714{
 715	while (n) {
 716		if (!memcmp(n->key, pkey, key_len) &&
 717		    net_eq(pneigh_net(n), net) &&
 718		    (n->dev == dev || !n->dev))
 719			return n;
 720		n = n->next;
 721	}
 722	return NULL;
 723}
 724
 725struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 726		struct net *net, const void *pkey, struct net_device *dev)
 727{
 728	unsigned int key_len = tbl->key_len;
 729	u32 hash_val = pneigh_hash(pkey, key_len);
 730
 731	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 732				 net, pkey, key_len, dev);
 733}
 734EXPORT_SYMBOL_GPL(__pneigh_lookup);
 735
 736struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 737				    struct net *net, const void *pkey,
 738				    struct net_device *dev, int creat)
 739{
 740	struct pneigh_entry *n;
 741	unsigned int key_len = tbl->key_len;
 742	u32 hash_val = pneigh_hash(pkey, key_len);
 743
 744	read_lock_bh(&tbl->lock);
 745	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 746			      net, pkey, key_len, dev);
 747	read_unlock_bh(&tbl->lock);
 748
 749	if (n || !creat)
 750		goto out;
 751
 752	ASSERT_RTNL();
 753
 754	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
 755	if (!n)
 756		goto out;
 757
 758	write_pnet(&n->net, net);
 759	memcpy(n->key, pkey, key_len);
 760	n->dev = dev;
 761	netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
 
 762
 763	if (tbl->pconstructor && tbl->pconstructor(n)) {
 764		netdev_put(dev, &n->dev_tracker);
 
 765		kfree(n);
 766		n = NULL;
 767		goto out;
 768	}
 769
 770	write_lock_bh(&tbl->lock);
 771	n->next = tbl->phash_buckets[hash_val];
 772	tbl->phash_buckets[hash_val] = n;
 773	write_unlock_bh(&tbl->lock);
 774out:
 775	return n;
 776}
 777EXPORT_SYMBOL(pneigh_lookup);
 778
 779
 780int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 781		  struct net_device *dev)
 782{
 783	struct pneigh_entry *n, **np;
 784	unsigned int key_len = tbl->key_len;
 785	u32 hash_val = pneigh_hash(pkey, key_len);
 786
 787	write_lock_bh(&tbl->lock);
 788	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 789	     np = &n->next) {
 790		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 791		    net_eq(pneigh_net(n), net)) {
 792			*np = n->next;
 793			write_unlock_bh(&tbl->lock);
 794			if (tbl->pdestructor)
 795				tbl->pdestructor(n);
 796			netdev_put(n->dev, &n->dev_tracker);
 
 797			kfree(n);
 798			return 0;
 799		}
 800	}
 801	write_unlock_bh(&tbl->lock);
 802	return -ENOENT;
 803}
 804
 805static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
 806				    struct net_device *dev)
 807{
 808	struct pneigh_entry *n, **np, *freelist = NULL;
 809	u32 h;
 810
 811	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 812		np = &tbl->phash_buckets[h];
 813		while ((n = *np) != NULL) {
 814			if (!dev || n->dev == dev) {
 815				*np = n->next;
 816				n->next = freelist;
 817				freelist = n;
 
 
 
 818				continue;
 819			}
 820			np = &n->next;
 821		}
 822	}
 823	write_unlock_bh(&tbl->lock);
 824	while ((n = freelist)) {
 825		freelist = n->next;
 826		n->next = NULL;
 827		if (tbl->pdestructor)
 828			tbl->pdestructor(n);
 829		netdev_put(n->dev, &n->dev_tracker);
 830		kfree(n);
 831	}
 832	return -ENOENT;
 833}
 834
 835static void neigh_parms_destroy(struct neigh_parms *parms);
 836
 837static inline void neigh_parms_put(struct neigh_parms *parms)
 838{
 839	if (refcount_dec_and_test(&parms->refcnt))
 840		neigh_parms_destroy(parms);
 841}
 842
 843/*
 844 *	neighbour must already be out of the table;
 845 *
 846 */
 847void neigh_destroy(struct neighbour *neigh)
 848{
 849	struct net_device *dev = neigh->dev;
 850
 851	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 852
 853	if (!neigh->dead) {
 854		pr_warn("Destroying alive neighbour %p\n", neigh);
 855		dump_stack();
 856		return;
 857	}
 858
 859	if (neigh_del_timer(neigh))
 860		pr_warn("Impossible event\n");
 861
 862	write_lock_bh(&neigh->lock);
 863	__skb_queue_purge(&neigh->arp_queue);
 864	write_unlock_bh(&neigh->lock);
 865	neigh->arp_queue_len_bytes = 0;
 866
 867	if (dev->netdev_ops->ndo_neigh_destroy)
 868		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
 869
 870	netdev_put(dev, &neigh->dev_tracker);
 871	neigh_parms_put(neigh->parms);
 872
 873	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 874
 875	atomic_dec(&neigh->tbl->entries);
 876	kfree_rcu(neigh, rcu);
 877}
 878EXPORT_SYMBOL(neigh_destroy);
 879
 880/* Neighbour state is suspicious;
 881   disable fast path.
 882
 883   Called with write_locked neigh.
 884 */
 885static void neigh_suspect(struct neighbour *neigh)
 886{
 887	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 888
 889	WRITE_ONCE(neigh->output, neigh->ops->output);
 890}
 891
 892/* Neighbour state is OK;
 893   enable fast path.
 894
 895   Called with write_locked neigh.
 896 */
 897static void neigh_connect(struct neighbour *neigh)
 898{
 899	neigh_dbg(2, "neigh %p is connected\n", neigh);
 900
 901	WRITE_ONCE(neigh->output, neigh->ops->connected_output);
 902}
 903
 904static void neigh_periodic_work(struct work_struct *work)
 905{
 906	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 907	struct neigh_hash_table *nht;
 908	struct hlist_node *tmp;
 909	struct neighbour *n;
 
 910	unsigned int i;
 
 911
 912	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 913
 914	write_lock_bh(&tbl->lock);
 915	nht = rcu_dereference_protected(tbl->nht,
 916					lockdep_is_held(&tbl->lock));
 917
 918	/*
 919	 *	periodically recompute ReachableTime from random function
 920	 */
 921
 922	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 923		struct neigh_parms *p;
 924
 925		WRITE_ONCE(tbl->last_rand, jiffies);
 926		list_for_each_entry(p, &tbl->parms_list, list)
 927			p->reachable_time =
 928				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 929	}
 930
 931	if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
 932		goto out;
 933
 934	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 935		neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[i]) {
 
 
 
 936			unsigned int state;
 937
 938			write_lock(&n->lock);
 939
 940			state = n->nud_state;
 941			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
 942			    (n->flags & NTF_EXT_LEARNED)) {
 943				write_unlock(&n->lock);
 944				continue;
 945			}
 946
 947			if (time_before(n->used, n->confirmed) &&
 948			    time_is_before_eq_jiffies(n->confirmed))
 949				n->used = n->confirmed;
 950
 951			if (refcount_read(&n->refcnt) == 1 &&
 952			    (state == NUD_FAILED ||
 953			     !time_in_range_open(jiffies, n->used,
 954						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 955				hlist_del_rcu(&n->hash);
 956				hlist_del_rcu(&n->dev_list);
 957				neigh_mark_dead(n);
 958				write_unlock(&n->lock);
 959				neigh_cleanup_and_release(n);
 960				continue;
 961			}
 962			write_unlock(&n->lock);
 
 
 
 963		}
 964		/*
 965		 * It's fine to release lock here, even if hash table
 966		 * grows while we are preempted.
 967		 */
 968		write_unlock_bh(&tbl->lock);
 969		cond_resched();
 970		write_lock_bh(&tbl->lock);
 971		nht = rcu_dereference_protected(tbl->nht,
 972						lockdep_is_held(&tbl->lock));
 973	}
 974out:
 975	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 976	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 977	 * BASE_REACHABLE_TIME.
 978	 */
 979	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 980			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 981	write_unlock_bh(&tbl->lock);
 982}
 983
 984static __inline__ int neigh_max_probes(struct neighbour *n)
 985{
 986	struct neigh_parms *p = n->parms;
 987	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 988	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 989	        NEIGH_VAR(p, MCAST_PROBES));
 990}
 991
 992static void neigh_invalidate(struct neighbour *neigh)
 993	__releases(neigh->lock)
 994	__acquires(neigh->lock)
 995{
 996	struct sk_buff *skb;
 997
 998	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 999	neigh_dbg(2, "neigh %p is failed\n", neigh);
1000	neigh->updated = jiffies;
1001
1002	/* It is very thin place. report_unreachable is very complicated
1003	   routine. Particularly, it can hit the same neighbour entry!
1004
1005	   So that, we try to be accurate and avoid dead loop. --ANK
1006	 */
1007	while (neigh->nud_state == NUD_FAILED &&
1008	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1009		write_unlock(&neigh->lock);
1010		neigh->ops->error_report(neigh, skb);
1011		write_lock(&neigh->lock);
1012	}
1013	__skb_queue_purge(&neigh->arp_queue);
1014	neigh->arp_queue_len_bytes = 0;
1015}
1016
1017static void neigh_probe(struct neighbour *neigh)
1018	__releases(neigh->lock)
1019{
1020	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1021	/* keep skb alive even if arp_queue overflows */
1022	if (skb)
1023		skb = skb_clone(skb, GFP_ATOMIC);
1024	write_unlock(&neigh->lock);
1025	if (neigh->ops->solicit)
1026		neigh->ops->solicit(neigh, skb);
1027	atomic_inc(&neigh->probes);
1028	consume_skb(skb);
1029}
1030
1031/* Called when a timer expires for a neighbour entry. */
1032
1033static void neigh_timer_handler(struct timer_list *t)
1034{
1035	unsigned long now, next;
1036	struct neighbour *neigh = from_timer(neigh, t, timer);
1037	unsigned int state;
1038	int notify = 0;
1039
1040	write_lock(&neigh->lock);
1041
1042	state = neigh->nud_state;
1043	now = jiffies;
1044	next = now + HZ;
1045
1046	if (!(state & NUD_IN_TIMER))
1047		goto out;
1048
1049	if (state & NUD_REACHABLE) {
1050		if (time_before_eq(now,
1051				   neigh->confirmed + neigh->parms->reachable_time)) {
1052			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1053			next = neigh->confirmed + neigh->parms->reachable_time;
1054		} else if (time_before_eq(now,
1055					  neigh->used +
1056					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1057			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1058			WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1059			neigh->updated = jiffies;
1060			neigh_suspect(neigh);
1061			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1062		} else {
1063			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1064			WRITE_ONCE(neigh->nud_state, NUD_STALE);
1065			neigh->updated = jiffies;
1066			neigh_suspect(neigh);
1067			notify = 1;
1068		}
1069	} else if (state & NUD_DELAY) {
1070		if (time_before_eq(now,
1071				   neigh->confirmed +
1072				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1073			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1074			WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1075			neigh->updated = jiffies;
1076			neigh_connect(neigh);
1077			notify = 1;
1078			next = neigh->confirmed + neigh->parms->reachable_time;
1079		} else {
1080			neigh_dbg(2, "neigh %p is probed\n", neigh);
1081			WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1082			neigh->updated = jiffies;
1083			atomic_set(&neigh->probes, 0);
1084			notify = 1;
1085			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1086					 HZ/100);
1087		}
1088	} else {
1089		/* NUD_PROBE|NUD_INCOMPLETE */
1090		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1091	}
1092
1093	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1094	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1095		WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1096		notify = 1;
1097		neigh_invalidate(neigh);
1098		goto out;
1099	}
1100
1101	if (neigh->nud_state & NUD_IN_TIMER) {
1102		if (time_before(next, jiffies + HZ/100))
1103			next = jiffies + HZ/100;
1104		if (!mod_timer(&neigh->timer, next))
1105			neigh_hold(neigh);
1106	}
1107	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1108		neigh_probe(neigh);
1109	} else {
1110out:
1111		write_unlock(&neigh->lock);
1112	}
1113
1114	if (notify)
1115		neigh_update_notify(neigh, 0);
1116
1117	trace_neigh_timer_handler(neigh, 0);
1118
1119	neigh_release(neigh);
1120}
1121
1122int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1123		       const bool immediate_ok)
1124{
1125	int rc;
1126	bool immediate_probe = false;
1127
1128	write_lock_bh(&neigh->lock);
1129
1130	rc = 0;
1131	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1132		goto out_unlock_bh;
1133	if (neigh->dead)
1134		goto out_dead;
1135
1136	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1137		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1138		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1139			unsigned long next, now = jiffies;
1140
1141			atomic_set(&neigh->probes,
1142				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1143			neigh_del_timer(neigh);
1144			WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1145			neigh->updated = now;
1146			if (!immediate_ok) {
1147				next = now + 1;
1148			} else {
1149				immediate_probe = true;
1150				next = now + max(NEIGH_VAR(neigh->parms,
1151							   RETRANS_TIME),
1152						 HZ / 100);
1153			}
1154			neigh_add_timer(neigh, next);
 
1155		} else {
1156			WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1157			neigh->updated = jiffies;
1158			write_unlock_bh(&neigh->lock);
1159
1160			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1161			return 1;
1162		}
1163	} else if (neigh->nud_state & NUD_STALE) {
1164		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1165		neigh_del_timer(neigh);
1166		WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1167		neigh->updated = jiffies;
1168		neigh_add_timer(neigh, jiffies +
1169				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1170	}
1171
1172	if (neigh->nud_state == NUD_INCOMPLETE) {
1173		if (skb) {
1174			while (neigh->arp_queue_len_bytes + skb->truesize >
1175			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1176				struct sk_buff *buff;
1177
1178				buff = __skb_dequeue(&neigh->arp_queue);
1179				if (!buff)
1180					break;
1181				neigh->arp_queue_len_bytes -= buff->truesize;
1182				kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1183				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1184			}
1185			skb_dst_force(skb);
1186			__skb_queue_tail(&neigh->arp_queue, skb);
1187			neigh->arp_queue_len_bytes += skb->truesize;
1188		}
1189		rc = 1;
1190	}
1191out_unlock_bh:
1192	if (immediate_probe)
1193		neigh_probe(neigh);
1194	else
1195		write_unlock(&neigh->lock);
1196	local_bh_enable();
1197	trace_neigh_event_send_done(neigh, rc);
1198	return rc;
1199
1200out_dead:
1201	if (neigh->nud_state & NUD_STALE)
1202		goto out_unlock_bh;
1203	write_unlock_bh(&neigh->lock);
1204	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1205	trace_neigh_event_send_dead(neigh, 1);
1206	return 1;
1207}
1208EXPORT_SYMBOL(__neigh_event_send);
1209
1210static void neigh_update_hhs(struct neighbour *neigh)
1211{
1212	struct hh_cache *hh;
1213	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1214		= NULL;
1215
1216	if (neigh->dev->header_ops)
1217		update = neigh->dev->header_ops->cache_update;
1218
1219	if (update) {
1220		hh = &neigh->hh;
1221		if (READ_ONCE(hh->hh_len)) {
1222			write_seqlock_bh(&hh->hh_lock);
1223			update(hh, neigh->dev, neigh->ha);
1224			write_sequnlock_bh(&hh->hh_lock);
1225		}
1226	}
1227}
1228
 
 
1229/* Generic update routine.
1230   -- lladdr is new lladdr or NULL, if it is not supplied.
1231   -- new    is new state.
1232   -- flags
1233	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1234				if it is different.
1235	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1236				lladdr instead of overriding it
1237				if it is different.
 
 
1238	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1239	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1240	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
1241	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1242				NTF_ROUTER flag.
1243	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1244				a router.
1245
1246   Caller MUST hold reference count on the entry.
1247 */
1248static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1249			  u8 new, u32 flags, u32 nlmsg_pid,
1250			  struct netlink_ext_ack *extack)
1251{
1252	bool gc_update = false, managed_update = false;
 
 
 
1253	int update_isrouter = 0;
1254	struct net_device *dev;
1255	int err, notify = 0;
1256	u8 old;
1257
1258	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1259
1260	write_lock_bh(&neigh->lock);
1261
1262	dev    = neigh->dev;
1263	old    = neigh->nud_state;
1264	err    = -EPERM;
1265
1266	if (neigh->dead) {
1267		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1268		new = old;
1269		goto out;
1270	}
1271	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1272	    (old & (NUD_NOARP | NUD_PERMANENT)))
1273		goto out;
1274
1275	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1276	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1277		new = old & ~NUD_PERMANENT;
1278		WRITE_ONCE(neigh->nud_state, new);
1279		err = 0;
1280		goto out;
1281	}
1282
1283	if (!(new & NUD_VALID)) {
1284		neigh_del_timer(neigh);
1285		if (old & NUD_CONNECTED)
1286			neigh_suspect(neigh);
1287		WRITE_ONCE(neigh->nud_state, new);
1288		err = 0;
1289		notify = old & NUD_VALID;
1290		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1291		    (new & NUD_FAILED)) {
1292			neigh_invalidate(neigh);
1293			notify = 1;
1294		}
1295		goto out;
1296	}
1297
1298	/* Compare new lladdr with cached one */
1299	if (!dev->addr_len) {
1300		/* First case: device needs no address. */
1301		lladdr = neigh->ha;
1302	} else if (lladdr) {
1303		/* The second case: if something is already cached
1304		   and a new address is proposed:
1305		   - compare new & old
1306		   - if they are different, check override flag
1307		 */
1308		if ((old & NUD_VALID) &&
1309		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1310			lladdr = neigh->ha;
1311	} else {
1312		/* No address is supplied; if we know something,
1313		   use it, otherwise discard the request.
1314		 */
1315		err = -EINVAL;
1316		if (!(old & NUD_VALID)) {
1317			NL_SET_ERR_MSG(extack, "No link layer address given");
1318			goto out;
1319		}
1320		lladdr = neigh->ha;
1321	}
1322
1323	/* Update confirmed timestamp for neighbour entry after we
1324	 * received ARP packet even if it doesn't change IP to MAC binding.
1325	 */
1326	if (new & NUD_CONNECTED)
1327		neigh->confirmed = jiffies;
 
1328
1329	/* If entry was valid and address is not changed,
1330	   do not change entry state, if new one is STALE.
1331	 */
1332	err = 0;
1333	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1334	if (old & NUD_VALID) {
1335		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1336			update_isrouter = 0;
1337			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1338			    (old & NUD_CONNECTED)) {
1339				lladdr = neigh->ha;
1340				new = NUD_STALE;
1341			} else
1342				goto out;
1343		} else {
1344			if (lladdr == neigh->ha && new == NUD_STALE &&
1345			    !(flags & NEIGH_UPDATE_F_ADMIN))
 
 
1346				new = old;
1347		}
1348	}
1349
1350	/* Update timestamp only once we know we will make a change to the
1351	 * neighbour entry. Otherwise we risk to move the locktime window with
1352	 * noop updates and ignore relevant ARP updates.
1353	 */
1354	if (new != old || lladdr != neigh->ha)
1355		neigh->updated = jiffies;
1356
1357	if (new != old) {
1358		neigh_del_timer(neigh);
1359		if (new & NUD_PROBE)
1360			atomic_set(&neigh->probes, 0);
1361		if (new & NUD_IN_TIMER)
1362			neigh_add_timer(neigh, (jiffies +
1363						((new & NUD_REACHABLE) ?
1364						 neigh->parms->reachable_time :
1365						 0)));
1366		WRITE_ONCE(neigh->nud_state, new);
1367		notify = 1;
1368	}
1369
1370	if (lladdr != neigh->ha) {
1371		write_seqlock(&neigh->ha_lock);
1372		memcpy(&neigh->ha, lladdr, dev->addr_len);
1373		write_sequnlock(&neigh->ha_lock);
1374		neigh_update_hhs(neigh);
1375		if (!(new & NUD_CONNECTED))
1376			neigh->confirmed = jiffies -
1377				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1378		notify = 1;
1379	}
1380	if (new == old)
1381		goto out;
1382	if (new & NUD_CONNECTED)
1383		neigh_connect(neigh);
1384	else
1385		neigh_suspect(neigh);
1386	if (!(old & NUD_VALID)) {
1387		struct sk_buff *skb;
1388
1389		/* Again: avoid dead loop if something went wrong */
1390
1391		while (neigh->nud_state & NUD_VALID &&
1392		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1393			struct dst_entry *dst = skb_dst(skb);
1394			struct neighbour *n2, *n1 = neigh;
1395			write_unlock_bh(&neigh->lock);
1396
1397			rcu_read_lock();
1398
1399			/* Why not just use 'neigh' as-is?  The problem is that
1400			 * things such as shaper, eql, and sch_teql can end up
1401			 * using alternative, different, neigh objects to output
1402			 * the packet in the output path.  So what we need to do
1403			 * here is re-lookup the top-level neigh in the path so
1404			 * we can reinject the packet there.
1405			 */
1406			n2 = NULL;
1407			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1408				n2 = dst_neigh_lookup_skb(dst, skb);
1409				if (n2)
1410					n1 = n2;
1411			}
1412			READ_ONCE(n1->output)(n1, skb);
1413			if (n2)
1414				neigh_release(n2);
1415			rcu_read_unlock();
1416
1417			write_lock_bh(&neigh->lock);
1418		}
1419		__skb_queue_purge(&neigh->arp_queue);
1420		neigh->arp_queue_len_bytes = 0;
1421	}
1422out:
1423	if (update_isrouter)
1424		neigh_update_is_router(neigh, flags, &notify);
 
 
 
1425	write_unlock_bh(&neigh->lock);
1426	if (((new ^ old) & NUD_PERMANENT) || gc_update)
1427		neigh_update_gc_list(neigh);
1428	if (managed_update)
1429		neigh_update_managed_list(neigh);
1430	if (notify)
1431		neigh_update_notify(neigh, nlmsg_pid);
1432	trace_neigh_update_done(neigh, err);
1433	return err;
1434}
1435
1436int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1437		 u32 flags, u32 nlmsg_pid)
1438{
1439	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1440}
1441EXPORT_SYMBOL(neigh_update);
1442
1443/* Update the neigh to listen temporarily for probe responses, even if it is
1444 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1445 */
1446void __neigh_set_probe_once(struct neighbour *neigh)
1447{
1448	if (neigh->dead)
1449		return;
1450	neigh->updated = jiffies;
1451	if (!(neigh->nud_state & NUD_FAILED))
1452		return;
1453	WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1454	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1455	neigh_add_timer(neigh,
1456			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1457				      HZ/100));
1458}
1459EXPORT_SYMBOL(__neigh_set_probe_once);
1460
1461struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1462				 u8 *lladdr, void *saddr,
1463				 struct net_device *dev)
1464{
1465	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1466						 lladdr || !dev->addr_len);
1467	if (neigh)
1468		neigh_update(neigh, lladdr, NUD_STALE,
1469			     NEIGH_UPDATE_F_OVERRIDE, 0);
1470	return neigh;
1471}
1472EXPORT_SYMBOL(neigh_event_ns);
1473
1474/* called with read_lock_bh(&n->lock); */
1475static void neigh_hh_init(struct neighbour *n)
1476{
1477	struct net_device *dev = n->dev;
1478	__be16 prot = n->tbl->protocol;
1479	struct hh_cache	*hh = &n->hh;
1480
1481	write_lock_bh(&n->lock);
1482
1483	/* Only one thread can come in here and initialize the
1484	 * hh_cache entry.
1485	 */
1486	if (!hh->hh_len)
1487		dev->header_ops->cache(n, hh, prot);
1488
1489	write_unlock_bh(&n->lock);
1490}
1491
1492/* Slow and careful. */
1493
1494int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1495{
1496	int rc = 0;
1497
1498	if (!neigh_event_send(neigh, skb)) {
1499		int err;
1500		struct net_device *dev = neigh->dev;
1501		unsigned int seq;
1502
1503		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1504			neigh_hh_init(neigh);
1505
1506		do {
1507			__skb_pull(skb, skb_network_offset(skb));
1508			seq = read_seqbegin(&neigh->ha_lock);
1509			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1510					      neigh->ha, NULL, skb->len);
1511		} while (read_seqretry(&neigh->ha_lock, seq));
1512
1513		if (err >= 0)
1514			rc = dev_queue_xmit(skb);
1515		else
1516			goto out_kfree_skb;
1517	}
1518out:
1519	return rc;
1520out_kfree_skb:
1521	rc = -EINVAL;
1522	kfree_skb(skb);
1523	goto out;
1524}
1525EXPORT_SYMBOL(neigh_resolve_output);
1526
1527/* As fast as possible without hh cache */
1528
1529int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1530{
1531	struct net_device *dev = neigh->dev;
1532	unsigned int seq;
1533	int err;
1534
1535	do {
1536		__skb_pull(skb, skb_network_offset(skb));
1537		seq = read_seqbegin(&neigh->ha_lock);
1538		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1539				      neigh->ha, NULL, skb->len);
1540	} while (read_seqretry(&neigh->ha_lock, seq));
1541
1542	if (err >= 0)
1543		err = dev_queue_xmit(skb);
1544	else {
1545		err = -EINVAL;
1546		kfree_skb(skb);
1547	}
1548	return err;
1549}
1550EXPORT_SYMBOL(neigh_connected_output);
1551
1552int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1553{
1554	return dev_queue_xmit(skb);
1555}
1556EXPORT_SYMBOL(neigh_direct_output);
1557
1558static void neigh_managed_work(struct work_struct *work)
1559{
1560	struct neigh_table *tbl = container_of(work, struct neigh_table,
1561					       managed_work.work);
1562	struct neighbour *neigh;
1563
1564	write_lock_bh(&tbl->lock);
1565	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1566		neigh_event_send_probe(neigh, NULL, false);
1567	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1568			   NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1569	write_unlock_bh(&tbl->lock);
1570}
1571
1572static void neigh_proxy_process(struct timer_list *t)
1573{
1574	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1575	long sched_next = 0;
1576	unsigned long now = jiffies;
1577	struct sk_buff *skb, *n;
1578
1579	spin_lock(&tbl->proxy_queue.lock);
1580
1581	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1582		long tdif = NEIGH_CB(skb)->sched_next - now;
1583
1584		if (tdif <= 0) {
1585			struct net_device *dev = skb->dev;
1586
1587			neigh_parms_qlen_dec(dev, tbl->family);
1588			__skb_unlink(skb, &tbl->proxy_queue);
1589
1590			if (tbl->proxy_redo && netif_running(dev)) {
1591				rcu_read_lock();
1592				tbl->proxy_redo(skb);
1593				rcu_read_unlock();
1594			} else {
1595				kfree_skb(skb);
1596			}
1597
1598			dev_put(dev);
1599		} else if (!sched_next || tdif < sched_next)
1600			sched_next = tdif;
1601	}
1602	del_timer(&tbl->proxy_timer);
1603	if (sched_next)
1604		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1605	spin_unlock(&tbl->proxy_queue.lock);
1606}
1607
1608static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1609{
1610	/* If proxy_delay is zero, do not call get_random_u32_below()
1611	 * as it is undefined behavior.
1612	 */
1613	unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1614
1615	return proxy_delay ?
1616	       jiffies + get_random_u32_below(proxy_delay) : jiffies;
1617}
1618
1619void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1620		    struct sk_buff *skb)
1621{
1622	unsigned long sched_next = neigh_proxy_delay(p);
1623
1624	if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
 
 
 
1625		kfree_skb(skb);
1626		return;
1627	}
1628
1629	NEIGH_CB(skb)->sched_next = sched_next;
1630	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1631
1632	spin_lock(&tbl->proxy_queue.lock);
1633	if (del_timer(&tbl->proxy_timer)) {
1634		if (time_before(tbl->proxy_timer.expires, sched_next))
1635			sched_next = tbl->proxy_timer.expires;
1636	}
1637	skb_dst_drop(skb);
1638	dev_hold(skb->dev);
1639	__skb_queue_tail(&tbl->proxy_queue, skb);
1640	p->qlen++;
1641	mod_timer(&tbl->proxy_timer, sched_next);
1642	spin_unlock(&tbl->proxy_queue.lock);
1643}
1644EXPORT_SYMBOL(pneigh_enqueue);
1645
1646static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1647						      struct net *net, int ifindex)
1648{
1649	struct neigh_parms *p;
1650
1651	list_for_each_entry(p, &tbl->parms_list, list) {
1652		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1653		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1654			return p;
1655	}
1656
1657	return NULL;
1658}
1659
1660struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1661				      struct neigh_table *tbl)
1662{
1663	struct neigh_parms *p;
1664	struct net *net = dev_net(dev);
1665	const struct net_device_ops *ops = dev->netdev_ops;
1666
1667	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1668	if (p) {
1669		p->tbl		  = tbl;
1670		refcount_set(&p->refcnt, 1);
1671		p->reachable_time =
1672				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1673		p->qlen = 0;
1674		netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1675		p->dev = dev;
1676		write_pnet(&p->net, net);
1677		p->sysctl_table = NULL;
1678
1679		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1680			netdev_put(dev, &p->dev_tracker);
1681			kfree(p);
1682			return NULL;
1683		}
1684
1685		write_lock_bh(&tbl->lock);
1686		list_add(&p->list, &tbl->parms.list);
1687		write_unlock_bh(&tbl->lock);
1688
1689		neigh_parms_data_state_cleanall(p);
1690	}
1691	return p;
1692}
1693EXPORT_SYMBOL(neigh_parms_alloc);
1694
1695static void neigh_rcu_free_parms(struct rcu_head *head)
1696{
1697	struct neigh_parms *parms =
1698		container_of(head, struct neigh_parms, rcu_head);
1699
1700	neigh_parms_put(parms);
1701}
1702
1703void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1704{
1705	if (!parms || parms == &tbl->parms)
1706		return;
1707	write_lock_bh(&tbl->lock);
1708	list_del(&parms->list);
1709	parms->dead = 1;
1710	write_unlock_bh(&tbl->lock);
1711	netdev_put(parms->dev, &parms->dev_tracker);
 
1712	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1713}
1714EXPORT_SYMBOL(neigh_parms_release);
1715
1716static void neigh_parms_destroy(struct neigh_parms *parms)
1717{
1718	kfree(parms);
1719}
1720
1721static struct lock_class_key neigh_table_proxy_queue_class;
1722
1723static struct neigh_table __rcu *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1724
1725void neigh_table_init(int index, struct neigh_table *tbl)
1726{
1727	unsigned long now = jiffies;
1728	unsigned long phsize;
1729
1730	INIT_LIST_HEAD(&tbl->parms_list);
1731	INIT_LIST_HEAD(&tbl->gc_list);
1732	INIT_LIST_HEAD(&tbl->managed_list);
1733
1734	list_add(&tbl->parms.list, &tbl->parms_list);
1735	write_pnet(&tbl->parms.net, &init_net);
1736	refcount_set(&tbl->parms.refcnt, 1);
1737	tbl->parms.reachable_time =
1738			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1739	tbl->parms.qlen = 0;
1740
1741	tbl->stats = alloc_percpu(struct neigh_statistics);
1742	if (!tbl->stats)
1743		panic("cannot create neighbour cache statistics");
1744
1745#ifdef CONFIG_PROC_FS
1746	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1747			      &neigh_stat_seq_ops, tbl))
1748		panic("cannot create neighbour proc dir entry");
1749#endif
1750
1751	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1752
1753	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1754	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1755
1756	if (!tbl->nht || !tbl->phash_buckets)
1757		panic("cannot allocate neighbour cache hashes");
1758
1759	if (!tbl->entry_size)
1760		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1761					tbl->key_len, NEIGH_PRIV_ALIGN);
1762	else
1763		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1764
1765	rwlock_init(&tbl->lock);
1766
1767	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1768	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1769			tbl->parms.reachable_time);
1770	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1771	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1772
1773	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1774	skb_queue_head_init_class(&tbl->proxy_queue,
1775			&neigh_table_proxy_queue_class);
1776
1777	tbl->last_flush = now;
1778	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1779
1780	rcu_assign_pointer(neigh_tables[index], tbl);
1781}
1782EXPORT_SYMBOL(neigh_table_init);
1783
1784/*
1785 * Only called from ndisc_cleanup(), which means this is dead code
1786 * because we no longer can unload IPv6 module.
1787 */
1788int neigh_table_clear(int index, struct neigh_table *tbl)
1789{
1790	RCU_INIT_POINTER(neigh_tables[index], NULL);
1791	synchronize_rcu();
1792
1793	/* It is not clean... Fix it to unload IPv6 module safely */
1794	cancel_delayed_work_sync(&tbl->managed_work);
1795	cancel_delayed_work_sync(&tbl->gc_work);
1796	del_timer_sync(&tbl->proxy_timer);
1797	pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1798	neigh_ifdown(tbl, NULL);
1799	if (atomic_read(&tbl->entries))
1800		pr_crit("neighbour leakage\n");
1801
1802	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1803		 neigh_hash_free_rcu);
1804	tbl->nht = NULL;
1805
1806	kfree(tbl->phash_buckets);
1807	tbl->phash_buckets = NULL;
1808
1809	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1810
1811	free_percpu(tbl->stats);
1812	tbl->stats = NULL;
1813
1814	return 0;
1815}
1816EXPORT_SYMBOL(neigh_table_clear);
1817
1818static struct neigh_table *neigh_find_table(int family)
1819{
1820	struct neigh_table *tbl = NULL;
1821
1822	switch (family) {
1823	case AF_INET:
1824		tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ARP_TABLE]);
1825		break;
1826	case AF_INET6:
1827		tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ND_TABLE]);
 
 
 
1828		break;
1829	}
1830
1831	return tbl;
1832}
1833
1834const struct nla_policy nda_policy[NDA_MAX+1] = {
1835	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1836	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1837	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1838	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1839	[NDA_PROBES]		= { .type = NLA_U32 },
1840	[NDA_VLAN]		= { .type = NLA_U16 },
1841	[NDA_PORT]		= { .type = NLA_U16 },
1842	[NDA_VNI]		= { .type = NLA_U32 },
1843	[NDA_IFINDEX]		= { .type = NLA_U32 },
1844	[NDA_MASTER]		= { .type = NLA_U32 },
1845	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1846	[NDA_NH_ID]		= { .type = NLA_U32 },
1847	[NDA_FLAGS_EXT]		= NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1848	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1849};
1850
1851static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1852			struct netlink_ext_ack *extack)
1853{
1854	struct net *net = sock_net(skb->sk);
1855	struct ndmsg *ndm;
1856	struct nlattr *dst_attr;
1857	struct neigh_table *tbl;
1858	struct neighbour *neigh;
1859	struct net_device *dev = NULL;
1860	int err = -EINVAL;
1861
1862	ASSERT_RTNL();
1863	if (nlmsg_len(nlh) < sizeof(*ndm))
1864		goto out;
1865
1866	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1867	if (!dst_attr) {
1868		NL_SET_ERR_MSG(extack, "Network address not specified");
1869		goto out;
1870	}
1871
1872	ndm = nlmsg_data(nlh);
1873	if (ndm->ndm_ifindex) {
1874		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1875		if (dev == NULL) {
1876			err = -ENODEV;
1877			goto out;
1878		}
1879	}
1880
1881	tbl = neigh_find_table(ndm->ndm_family);
1882	if (tbl == NULL)
1883		return -EAFNOSUPPORT;
1884
1885	if (nla_len(dst_attr) < (int)tbl->key_len) {
1886		NL_SET_ERR_MSG(extack, "Invalid network address");
1887		goto out;
1888	}
1889
1890	if (ndm->ndm_flags & NTF_PROXY) {
1891		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1892		goto out;
1893	}
1894
1895	if (dev == NULL)
1896		goto out;
1897
1898	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1899	if (neigh == NULL) {
1900		err = -ENOENT;
1901		goto out;
1902	}
1903
1904	err = __neigh_update(neigh, NULL, NUD_FAILED,
1905			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1906			     NETLINK_CB(skb).portid, extack);
1907	write_lock_bh(&tbl->lock);
1908	neigh_release(neigh);
1909	neigh_remove_one(neigh);
1910	write_unlock_bh(&tbl->lock);
1911
1912out:
1913	return err;
1914}
1915
1916static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1917		     struct netlink_ext_ack *extack)
1918{
1919	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1920		    NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1921	struct net *net = sock_net(skb->sk);
1922	struct ndmsg *ndm;
1923	struct nlattr *tb[NDA_MAX+1];
1924	struct neigh_table *tbl;
1925	struct net_device *dev = NULL;
1926	struct neighbour *neigh;
1927	void *dst, *lladdr;
1928	u8 protocol = 0;
1929	u32 ndm_flags;
1930	int err;
1931
1932	ASSERT_RTNL();
1933	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1934				     nda_policy, extack);
1935	if (err < 0)
1936		goto out;
1937
1938	err = -EINVAL;
1939	if (!tb[NDA_DST]) {
1940		NL_SET_ERR_MSG(extack, "Network address not specified");
1941		goto out;
1942	}
1943
1944	ndm = nlmsg_data(nlh);
1945	ndm_flags = ndm->ndm_flags;
1946	if (tb[NDA_FLAGS_EXT]) {
1947		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1948
1949		BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1950			     (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1951			      hweight32(NTF_EXT_MASK)));
1952		ndm_flags |= (ext << NTF_EXT_SHIFT);
1953	}
1954	if (ndm->ndm_ifindex) {
1955		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1956		if (dev == NULL) {
1957			err = -ENODEV;
1958			goto out;
1959		}
1960
1961		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1962			NL_SET_ERR_MSG(extack, "Invalid link address");
1963			goto out;
1964		}
1965	}
1966
1967	tbl = neigh_find_table(ndm->ndm_family);
1968	if (tbl == NULL)
1969		return -EAFNOSUPPORT;
1970
1971	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1972		NL_SET_ERR_MSG(extack, "Invalid network address");
1973		goto out;
1974	}
1975
1976	dst = nla_data(tb[NDA_DST]);
1977	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1978
1979	if (tb[NDA_PROTOCOL])
1980		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1981	if (ndm_flags & NTF_PROXY) {
1982		struct pneigh_entry *pn;
1983
1984		if (ndm_flags & NTF_MANAGED) {
1985			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
1986			goto out;
1987		}
1988
1989		err = -ENOBUFS;
1990		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1991		if (pn) {
1992			pn->flags = ndm_flags;
1993			if (protocol)
1994				pn->protocol = protocol;
1995			err = 0;
1996		}
1997		goto out;
1998	}
1999
2000	if (!dev) {
2001		NL_SET_ERR_MSG(extack, "Device not specified");
2002		goto out;
2003	}
2004
2005	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2006		err = -EINVAL;
2007		goto out;
2008	}
2009
2010	neigh = neigh_lookup(tbl, dst, dev);
2011	if (neigh == NULL) {
2012		bool ndm_permanent  = ndm->ndm_state & NUD_PERMANENT;
2013		bool exempt_from_gc = ndm_permanent ||
2014				      ndm_flags & NTF_EXT_LEARNED;
2015
2016		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2017			err = -ENOENT;
2018			goto out;
2019		}
2020		if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2021			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2022			err = -EINVAL;
2023			goto out;
2024		}
2025
2026		neigh = ___neigh_create(tbl, dst, dev,
2027					ndm_flags &
2028					(NTF_EXT_LEARNED | NTF_MANAGED),
2029					exempt_from_gc, true);
2030		if (IS_ERR(neigh)) {
2031			err = PTR_ERR(neigh);
2032			goto out;
2033		}
2034	} else {
2035		if (nlh->nlmsg_flags & NLM_F_EXCL) {
2036			err = -EEXIST;
2037			neigh_release(neigh);
2038			goto out;
2039		}
2040
2041		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2042			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2043				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2044	}
2045
2046	if (protocol)
2047		neigh->protocol = protocol;
2048	if (ndm_flags & NTF_EXT_LEARNED)
2049		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2050	if (ndm_flags & NTF_ROUTER)
2051		flags |= NEIGH_UPDATE_F_ISROUTER;
2052	if (ndm_flags & NTF_MANAGED)
2053		flags |= NEIGH_UPDATE_F_MANAGED;
2054	if (ndm_flags & NTF_USE)
2055		flags |= NEIGH_UPDATE_F_USE;
2056
2057	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2058			     NETLINK_CB(skb).portid, extack);
2059	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2060		neigh_event_send(neigh, NULL);
2061		err = 0;
2062	}
 
2063	neigh_release(neigh);
 
2064out:
2065	return err;
2066}
2067
2068static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2069{
2070	struct nlattr *nest;
2071
2072	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2073	if (nest == NULL)
2074		return -ENOBUFS;
2075
2076	if ((parms->dev &&
2077	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2078	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2079	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2080			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2081	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2082	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2083			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2084	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2085	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2086	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2087			NEIGH_VAR(parms, UCAST_PROBES)) ||
2088	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2089			NEIGH_VAR(parms, MCAST_PROBES)) ||
2090	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2091			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2092	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2093			  NDTPA_PAD) ||
2094	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2095			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2096	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2097			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2098	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2099			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2100	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2101			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2102	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2103			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2104	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2105			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2106	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2107			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2108	    nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2109			  NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2110		goto nla_put_failure;
2111	return nla_nest_end(skb, nest);
2112
2113nla_put_failure:
2114	nla_nest_cancel(skb, nest);
2115	return -EMSGSIZE;
2116}
2117
2118static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2119			      u32 pid, u32 seq, int type, int flags)
2120{
2121	struct nlmsghdr *nlh;
2122	struct ndtmsg *ndtmsg;
2123
2124	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2125	if (nlh == NULL)
2126		return -EMSGSIZE;
2127
2128	ndtmsg = nlmsg_data(nlh);
2129
2130	read_lock_bh(&tbl->lock);
2131	ndtmsg->ndtm_family = tbl->family;
2132	ndtmsg->ndtm_pad1   = 0;
2133	ndtmsg->ndtm_pad2   = 0;
2134
2135	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2136	    nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2137			  NDTA_PAD) ||
2138	    nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2139	    nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2140	    nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2141		goto nla_put_failure;
2142	{
2143		unsigned long now = jiffies;
2144		long flush_delta = now - READ_ONCE(tbl->last_flush);
2145		long rand_delta = now - READ_ONCE(tbl->last_rand);
2146		struct neigh_hash_table *nht;
2147		struct ndt_config ndc = {
2148			.ndtc_key_len		= tbl->key_len,
2149			.ndtc_entry_size	= tbl->entry_size,
2150			.ndtc_entries		= atomic_read(&tbl->entries),
2151			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2152			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2153			.ndtc_proxy_qlen	= READ_ONCE(tbl->proxy_queue.qlen),
2154		};
2155
2156		rcu_read_lock();
2157		nht = rcu_dereference(tbl->nht);
2158		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2159		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2160		rcu_read_unlock();
2161
2162		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2163			goto nla_put_failure;
2164	}
2165
2166	{
2167		int cpu;
2168		struct ndt_stats ndst;
2169
2170		memset(&ndst, 0, sizeof(ndst));
2171
2172		for_each_possible_cpu(cpu) {
2173			struct neigh_statistics	*st;
2174
2175			st = per_cpu_ptr(tbl->stats, cpu);
2176			ndst.ndts_allocs		+= READ_ONCE(st->allocs);
2177			ndst.ndts_destroys		+= READ_ONCE(st->destroys);
2178			ndst.ndts_hash_grows		+= READ_ONCE(st->hash_grows);
2179			ndst.ndts_res_failed		+= READ_ONCE(st->res_failed);
2180			ndst.ndts_lookups		+= READ_ONCE(st->lookups);
2181			ndst.ndts_hits			+= READ_ONCE(st->hits);
2182			ndst.ndts_rcv_probes_mcast	+= READ_ONCE(st->rcv_probes_mcast);
2183			ndst.ndts_rcv_probes_ucast	+= READ_ONCE(st->rcv_probes_ucast);
2184			ndst.ndts_periodic_gc_runs	+= READ_ONCE(st->periodic_gc_runs);
2185			ndst.ndts_forced_gc_runs	+= READ_ONCE(st->forced_gc_runs);
2186			ndst.ndts_table_fulls		+= READ_ONCE(st->table_fulls);
2187		}
2188
2189		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2190				  NDTA_PAD))
2191			goto nla_put_failure;
2192	}
2193
2194	BUG_ON(tbl->parms.dev);
2195	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2196		goto nla_put_failure;
2197
2198	read_unlock_bh(&tbl->lock);
2199	nlmsg_end(skb, nlh);
2200	return 0;
2201
2202nla_put_failure:
2203	read_unlock_bh(&tbl->lock);
2204	nlmsg_cancel(skb, nlh);
2205	return -EMSGSIZE;
2206}
2207
2208static int neightbl_fill_param_info(struct sk_buff *skb,
2209				    struct neigh_table *tbl,
2210				    struct neigh_parms *parms,
2211				    u32 pid, u32 seq, int type,
2212				    unsigned int flags)
2213{
2214	struct ndtmsg *ndtmsg;
2215	struct nlmsghdr *nlh;
2216
2217	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2218	if (nlh == NULL)
2219		return -EMSGSIZE;
2220
2221	ndtmsg = nlmsg_data(nlh);
2222
2223	read_lock_bh(&tbl->lock);
2224	ndtmsg->ndtm_family = tbl->family;
2225	ndtmsg->ndtm_pad1   = 0;
2226	ndtmsg->ndtm_pad2   = 0;
2227
2228	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2229	    neightbl_fill_parms(skb, parms) < 0)
2230		goto errout;
2231
2232	read_unlock_bh(&tbl->lock);
2233	nlmsg_end(skb, nlh);
2234	return 0;
2235errout:
2236	read_unlock_bh(&tbl->lock);
2237	nlmsg_cancel(skb, nlh);
2238	return -EMSGSIZE;
2239}
2240
2241static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2242	[NDTA_NAME]		= { .type = NLA_STRING },
2243	[NDTA_THRESH1]		= { .type = NLA_U32 },
2244	[NDTA_THRESH2]		= { .type = NLA_U32 },
2245	[NDTA_THRESH3]		= { .type = NLA_U32 },
2246	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2247	[NDTA_PARMS]		= { .type = NLA_NESTED },
2248};
2249
2250static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2251	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2252	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2253	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2254	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2255	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2256	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2257	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2258	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2259	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2260	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2261	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2262	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2263	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2264	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2265	[NDTPA_INTERVAL_PROBE_TIME_MS]	= { .type = NLA_U64, .min = 1 },
2266};
2267
2268static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2269			struct netlink_ext_ack *extack)
2270{
2271	struct net *net = sock_net(skb->sk);
2272	struct neigh_table *tbl;
2273	struct ndtmsg *ndtmsg;
2274	struct nlattr *tb[NDTA_MAX+1];
2275	bool found = false;
2276	int err, tidx;
2277
2278	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2279				     nl_neightbl_policy, extack);
2280	if (err < 0)
2281		goto errout;
2282
2283	if (tb[NDTA_NAME] == NULL) {
2284		err = -EINVAL;
2285		goto errout;
2286	}
2287
2288	ndtmsg = nlmsg_data(nlh);
2289
2290	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2291		tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
2292		if (!tbl)
2293			continue;
2294		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2295			continue;
2296		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2297			found = true;
2298			break;
2299		}
2300	}
2301
2302	if (!found)
2303		return -ENOENT;
2304
2305	/*
2306	 * We acquire tbl->lock to be nice to the periodic timers and
2307	 * make sure they always see a consistent set of values.
2308	 */
2309	write_lock_bh(&tbl->lock);
2310
2311	if (tb[NDTA_PARMS]) {
2312		struct nlattr *tbp[NDTPA_MAX+1];
2313		struct neigh_parms *p;
2314		int i, ifindex = 0;
2315
2316		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2317						  tb[NDTA_PARMS],
2318						  nl_ntbl_parm_policy, extack);
2319		if (err < 0)
2320			goto errout_tbl_lock;
2321
2322		if (tbp[NDTPA_IFINDEX])
2323			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2324
2325		p = lookup_neigh_parms(tbl, net, ifindex);
2326		if (p == NULL) {
2327			err = -ENOENT;
2328			goto errout_tbl_lock;
2329		}
2330
2331		for (i = 1; i <= NDTPA_MAX; i++) {
2332			if (tbp[i] == NULL)
2333				continue;
2334
2335			switch (i) {
2336			case NDTPA_QUEUE_LEN:
2337				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2338					      nla_get_u32(tbp[i]) *
2339					      SKB_TRUESIZE(ETH_FRAME_LEN));
2340				break;
2341			case NDTPA_QUEUE_LENBYTES:
2342				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2343					      nla_get_u32(tbp[i]));
2344				break;
2345			case NDTPA_PROXY_QLEN:
2346				NEIGH_VAR_SET(p, PROXY_QLEN,
2347					      nla_get_u32(tbp[i]));
2348				break;
2349			case NDTPA_APP_PROBES:
2350				NEIGH_VAR_SET(p, APP_PROBES,
2351					      nla_get_u32(tbp[i]));
2352				break;
2353			case NDTPA_UCAST_PROBES:
2354				NEIGH_VAR_SET(p, UCAST_PROBES,
2355					      nla_get_u32(tbp[i]));
2356				break;
2357			case NDTPA_MCAST_PROBES:
2358				NEIGH_VAR_SET(p, MCAST_PROBES,
2359					      nla_get_u32(tbp[i]));
2360				break;
2361			case NDTPA_MCAST_REPROBES:
2362				NEIGH_VAR_SET(p, MCAST_REPROBES,
2363					      nla_get_u32(tbp[i]));
2364				break;
2365			case NDTPA_BASE_REACHABLE_TIME:
2366				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2367					      nla_get_msecs(tbp[i]));
2368				/* update reachable_time as well, otherwise, the change will
2369				 * only be effective after the next time neigh_periodic_work
2370				 * decides to recompute it (can be multiple minutes)
2371				 */
2372				p->reachable_time =
2373					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2374				break;
2375			case NDTPA_GC_STALETIME:
2376				NEIGH_VAR_SET(p, GC_STALETIME,
2377					      nla_get_msecs(tbp[i]));
2378				break;
2379			case NDTPA_DELAY_PROBE_TIME:
2380				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2381					      nla_get_msecs(tbp[i]));
2382				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2383				break;
2384			case NDTPA_INTERVAL_PROBE_TIME_MS:
2385				NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2386					      nla_get_msecs(tbp[i]));
2387				break;
2388			case NDTPA_RETRANS_TIME:
2389				NEIGH_VAR_SET(p, RETRANS_TIME,
2390					      nla_get_msecs(tbp[i]));
2391				break;
2392			case NDTPA_ANYCAST_DELAY:
2393				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2394					      nla_get_msecs(tbp[i]));
2395				break;
2396			case NDTPA_PROXY_DELAY:
2397				NEIGH_VAR_SET(p, PROXY_DELAY,
2398					      nla_get_msecs(tbp[i]));
2399				break;
2400			case NDTPA_LOCKTIME:
2401				NEIGH_VAR_SET(p, LOCKTIME,
2402					      nla_get_msecs(tbp[i]));
2403				break;
2404			}
2405		}
2406	}
2407
2408	err = -ENOENT;
2409	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2410	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2411	    !net_eq(net, &init_net))
2412		goto errout_tbl_lock;
2413
2414	if (tb[NDTA_THRESH1])
2415		WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2416
2417	if (tb[NDTA_THRESH2])
2418		WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2419
2420	if (tb[NDTA_THRESH3])
2421		WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2422
2423	if (tb[NDTA_GC_INTERVAL])
2424		WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2425
2426	err = 0;
2427
2428errout_tbl_lock:
2429	write_unlock_bh(&tbl->lock);
2430errout:
2431	return err;
2432}
2433
2434static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2435				    struct netlink_ext_ack *extack)
2436{
2437	struct ndtmsg *ndtm;
2438
2439	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2440		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2441		return -EINVAL;
2442	}
2443
2444	ndtm = nlmsg_data(nlh);
2445	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2446		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2447		return -EINVAL;
2448	}
2449
2450	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2451		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2452		return -EINVAL;
2453	}
2454
2455	return 0;
2456}
2457
2458static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2459{
2460	const struct nlmsghdr *nlh = cb->nlh;
2461	struct net *net = sock_net(skb->sk);
2462	int family, tidx, nidx = 0;
2463	int tbl_skip = cb->args[0];
2464	int neigh_skip = cb->args[1];
2465	struct neigh_table *tbl;
2466
2467	if (cb->strict_check) {
2468		int err = neightbl_valid_dump_info(nlh, cb->extack);
2469
2470		if (err < 0)
2471			return err;
2472	}
2473
2474	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2475
2476	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2477		struct neigh_parms *p;
2478
2479		tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
2480		if (!tbl)
2481			continue;
2482
2483		if (tidx < tbl_skip || (family && tbl->family != family))
2484			continue;
2485
2486		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2487				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2488				       NLM_F_MULTI) < 0)
2489			break;
2490
2491		nidx = 0;
2492		p = list_next_entry(&tbl->parms, list);
2493		list_for_each_entry_from(p, &tbl->parms_list, list) {
2494			if (!net_eq(neigh_parms_net(p), net))
2495				continue;
2496
2497			if (nidx < neigh_skip)
2498				goto next;
2499
2500			if (neightbl_fill_param_info(skb, tbl, p,
2501						     NETLINK_CB(cb->skb).portid,
2502						     nlh->nlmsg_seq,
2503						     RTM_NEWNEIGHTBL,
2504						     NLM_F_MULTI) < 0)
2505				goto out;
2506		next:
2507			nidx++;
2508		}
2509
2510		neigh_skip = 0;
2511	}
2512out:
2513	cb->args[0] = tidx;
2514	cb->args[1] = nidx;
2515
2516	return skb->len;
2517}
2518
2519static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2520			   u32 pid, u32 seq, int type, unsigned int flags)
2521{
2522	u32 neigh_flags, neigh_flags_ext;
2523	unsigned long now = jiffies;
2524	struct nda_cacheinfo ci;
2525	struct nlmsghdr *nlh;
2526	struct ndmsg *ndm;
2527
2528	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2529	if (nlh == NULL)
2530		return -EMSGSIZE;
2531
2532	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2533	neigh_flags     = neigh->flags & NTF_OLD_MASK;
2534
2535	ndm = nlmsg_data(nlh);
2536	ndm->ndm_family	 = neigh->ops->family;
2537	ndm->ndm_pad1    = 0;
2538	ndm->ndm_pad2    = 0;
2539	ndm->ndm_flags	 = neigh_flags;
2540	ndm->ndm_type	 = neigh->type;
2541	ndm->ndm_ifindex = neigh->dev->ifindex;
2542
2543	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2544		goto nla_put_failure;
2545
2546	read_lock_bh(&neigh->lock);
2547	ndm->ndm_state	 = neigh->nud_state;
2548	if (neigh->nud_state & NUD_VALID) {
2549		char haddr[MAX_ADDR_LEN];
2550
2551		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2552		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2553			read_unlock_bh(&neigh->lock);
2554			goto nla_put_failure;
2555		}
2556	}
2557
2558	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2559	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2560	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2561	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2562	read_unlock_bh(&neigh->lock);
2563
2564	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2565	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2566		goto nla_put_failure;
2567
2568	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2569		goto nla_put_failure;
2570	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2571		goto nla_put_failure;
2572
2573	nlmsg_end(skb, nlh);
2574	return 0;
2575
2576nla_put_failure:
2577	nlmsg_cancel(skb, nlh);
2578	return -EMSGSIZE;
2579}
2580
2581static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2582			    u32 pid, u32 seq, int type, unsigned int flags,
2583			    struct neigh_table *tbl)
2584{
2585	u32 neigh_flags, neigh_flags_ext;
2586	struct nlmsghdr *nlh;
2587	struct ndmsg *ndm;
2588
2589	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2590	if (nlh == NULL)
2591		return -EMSGSIZE;
2592
2593	neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2594	neigh_flags     = pn->flags & NTF_OLD_MASK;
2595
2596	ndm = nlmsg_data(nlh);
2597	ndm->ndm_family	 = tbl->family;
2598	ndm->ndm_pad1    = 0;
2599	ndm->ndm_pad2    = 0;
2600	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
2601	ndm->ndm_type	 = RTN_UNICAST;
2602	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2603	ndm->ndm_state	 = NUD_NONE;
2604
2605	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2606		goto nla_put_failure;
2607
2608	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2609		goto nla_put_failure;
2610	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2611		goto nla_put_failure;
2612
2613	nlmsg_end(skb, nlh);
2614	return 0;
2615
2616nla_put_failure:
2617	nlmsg_cancel(skb, nlh);
2618	return -EMSGSIZE;
2619}
2620
2621static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2622{
2623	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2624	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2625}
2626
2627static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2628{
2629	struct net_device *master;
2630
2631	if (!master_idx)
2632		return false;
2633
2634	master = dev ? netdev_master_upper_dev_get_rcu(dev) : NULL;
2635
2636	/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2637	 * invalid value for ifindex to denote "no master".
2638	 */
2639	if (master_idx == -1)
2640		return !!master;
2641
2642	if (!master || master->ifindex != master_idx)
2643		return true;
2644
2645	return false;
2646}
2647
2648static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2649{
2650	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2651		return true;
2652
2653	return false;
2654}
2655
2656struct neigh_dump_filter {
2657	int master_idx;
2658	int dev_idx;
2659};
2660
2661static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2662			    struct netlink_callback *cb,
2663			    struct neigh_dump_filter *filter)
2664{
2665	struct net *net = sock_net(skb->sk);
 
 
2666	struct neighbour *n;
2667	int err = 0, h, s_h = cb->args[1];
2668	int idx, s_idx = idx = cb->args[2];
2669	struct neigh_hash_table *nht;
 
2670	unsigned int flags = NLM_F_MULTI;
 
 
 
 
 
 
2671
2672	if (filter->dev_idx || filter->master_idx)
2673		flags |= NLM_F_DUMP_FILTERED;
 
 
 
 
2674
2675	nht = rcu_dereference(tbl->nht);
 
2676
2677	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2678		if (h > s_h)
2679			s_idx = 0;
2680		idx = 0;
2681		neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[h]) {
2682			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
 
 
 
 
 
 
 
2683				goto next;
2684			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2685			    neigh_master_filtered(n->dev, filter->master_idx))
2686				goto next;
2687			err = neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2688					      cb->nlh->nlmsg_seq,
2689					      RTM_NEWNEIGH, flags);
2690			if (err < 0)
2691				goto out;
 
2692next:
2693			idx++;
2694		}
2695	}
 
2696out:
 
2697	cb->args[1] = h;
2698	cb->args[2] = idx;
2699	return err;
2700}
2701
2702static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2703			     struct netlink_callback *cb,
2704			     struct neigh_dump_filter *filter)
2705{
2706	struct pneigh_entry *n;
2707	struct net *net = sock_net(skb->sk);
2708	int err = 0, h, s_h = cb->args[3];
2709	int idx, s_idx = idx = cb->args[4];
2710	unsigned int flags = NLM_F_MULTI;
2711
2712	if (filter->dev_idx || filter->master_idx)
2713		flags |= NLM_F_DUMP_FILTERED;
2714
2715	read_lock_bh(&tbl->lock);
2716
2717	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2718		if (h > s_h)
2719			s_idx = 0;
2720		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2721			if (idx < s_idx || pneigh_net(n) != net)
2722				goto next;
2723			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2724			    neigh_master_filtered(n->dev, filter->master_idx))
2725				goto next;
2726			err = pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2727					       cb->nlh->nlmsg_seq,
2728					       RTM_NEWNEIGH, flags, tbl);
2729			if (err < 0) {
2730				read_unlock_bh(&tbl->lock);
 
2731				goto out;
2732			}
2733		next:
2734			idx++;
2735		}
2736	}
2737
2738	read_unlock_bh(&tbl->lock);
 
2739out:
2740	cb->args[3] = h;
2741	cb->args[4] = idx;
2742	return err;
2743}
2744
2745static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2746				bool strict_check,
2747				struct neigh_dump_filter *filter,
2748				struct netlink_ext_ack *extack)
2749{
2750	struct nlattr *tb[NDA_MAX + 1];
2751	int err, i;
2752
2753	if (strict_check) {
2754		struct ndmsg *ndm;
2755
2756		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2757			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2758			return -EINVAL;
2759		}
2760
2761		ndm = nlmsg_data(nlh);
2762		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2763		    ndm->ndm_state || ndm->ndm_type) {
2764			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2765			return -EINVAL;
2766		}
2767
2768		if (ndm->ndm_flags & ~NTF_PROXY) {
2769			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2770			return -EINVAL;
2771		}
2772
2773		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2774						    tb, NDA_MAX, nda_policy,
2775						    extack);
2776	} else {
2777		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2778					     NDA_MAX, nda_policy, extack);
2779	}
2780	if (err < 0)
2781		return err;
2782
2783	for (i = 0; i <= NDA_MAX; ++i) {
2784		if (!tb[i])
2785			continue;
2786
2787		/* all new attributes should require strict_check */
2788		switch (i) {
2789		case NDA_IFINDEX:
2790			filter->dev_idx = nla_get_u32(tb[i]);
2791			break;
2792		case NDA_MASTER:
2793			filter->master_idx = nla_get_u32(tb[i]);
2794			break;
2795		default:
2796			if (strict_check) {
2797				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2798				return -EINVAL;
2799			}
2800		}
2801	}
2802
2803	return 0;
2804}
2805
2806static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2807{
2808	const struct nlmsghdr *nlh = cb->nlh;
2809	struct neigh_dump_filter filter = {};
2810	struct neigh_table *tbl;
2811	int t, family, s_t;
2812	int proxy = 0;
2813	int err;
2814
2815	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2816
2817	/* check for full ndmsg structure presence, family member is
2818	 * the same for both structures
2819	 */
2820	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2821	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2822		proxy = 1;
2823
2824	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2825	if (err < 0 && cb->strict_check)
2826		return err;
2827	err = 0;
2828
2829	s_t = cb->args[0];
2830
2831	rcu_read_lock();
2832	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2833		tbl = rcu_dereference(neigh_tables[t]);
2834
2835		if (!tbl)
2836			continue;
2837		if (t < s_t || (family && tbl->family != family))
2838			continue;
2839		if (t > s_t)
2840			memset(&cb->args[1], 0, sizeof(cb->args) -
2841						sizeof(cb->args[0]));
2842		if (proxy)
2843			err = pneigh_dump_table(tbl, skb, cb, &filter);
2844		else
2845			err = neigh_dump_table(tbl, skb, cb, &filter);
2846		if (err < 0)
2847			break;
2848	}
2849	rcu_read_unlock();
2850
2851	cb->args[0] = t;
2852	return err;
2853}
2854
2855static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2856			       struct neigh_table **tbl,
2857			       void **dst, int *dev_idx, u8 *ndm_flags,
2858			       struct netlink_ext_ack *extack)
2859{
2860	struct nlattr *tb[NDA_MAX + 1];
2861	struct ndmsg *ndm;
2862	int err, i;
2863
2864	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2865		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2866		return -EINVAL;
2867	}
2868
2869	ndm = nlmsg_data(nlh);
2870	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2871	    ndm->ndm_type) {
2872		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2873		return -EINVAL;
2874	}
2875
2876	if (ndm->ndm_flags & ~NTF_PROXY) {
2877		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2878		return -EINVAL;
2879	}
2880
2881	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2882					    NDA_MAX, nda_policy, extack);
2883	if (err < 0)
2884		return err;
2885
2886	*ndm_flags = ndm->ndm_flags;
2887	*dev_idx = ndm->ndm_ifindex;
2888	*tbl = neigh_find_table(ndm->ndm_family);
2889	if (*tbl == NULL) {
2890		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2891		return -EAFNOSUPPORT;
2892	}
2893
2894	for (i = 0; i <= NDA_MAX; ++i) {
2895		if (!tb[i])
2896			continue;
2897
2898		switch (i) {
2899		case NDA_DST:
2900			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2901				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2902				return -EINVAL;
2903			}
2904			*dst = nla_data(tb[i]);
2905			break;
2906		default:
2907			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2908			return -EINVAL;
2909		}
2910	}
2911
2912	return 0;
2913}
2914
2915static inline size_t neigh_nlmsg_size(void)
2916{
2917	return NLMSG_ALIGN(sizeof(struct ndmsg))
2918	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2919	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2920	       + nla_total_size(sizeof(struct nda_cacheinfo))
2921	       + nla_total_size(4)  /* NDA_PROBES */
2922	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2923	       + nla_total_size(1); /* NDA_PROTOCOL */
2924}
2925
2926static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2927			   u32 pid, u32 seq)
2928{
2929	struct sk_buff *skb;
2930	int err = 0;
2931
2932	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2933	if (!skb)
2934		return -ENOBUFS;
2935
2936	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2937	if (err) {
2938		kfree_skb(skb);
2939		goto errout;
2940	}
2941
2942	err = rtnl_unicast(skb, net, pid);
2943errout:
2944	return err;
2945}
2946
2947static inline size_t pneigh_nlmsg_size(void)
2948{
2949	return NLMSG_ALIGN(sizeof(struct ndmsg))
2950	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2951	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2952	       + nla_total_size(1); /* NDA_PROTOCOL */
2953}
2954
2955static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2956			    u32 pid, u32 seq, struct neigh_table *tbl)
2957{
2958	struct sk_buff *skb;
2959	int err = 0;
2960
2961	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2962	if (!skb)
2963		return -ENOBUFS;
2964
2965	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2966	if (err) {
2967		kfree_skb(skb);
2968		goto errout;
2969	}
2970
2971	err = rtnl_unicast(skb, net, pid);
2972errout:
2973	return err;
2974}
2975
2976static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2977		     struct netlink_ext_ack *extack)
2978{
2979	struct net *net = sock_net(in_skb->sk);
2980	struct net_device *dev = NULL;
2981	struct neigh_table *tbl = NULL;
2982	struct neighbour *neigh;
2983	void *dst = NULL;
2984	u8 ndm_flags = 0;
2985	int dev_idx = 0;
2986	int err;
2987
2988	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2989				  extack);
2990	if (err < 0)
2991		return err;
2992
2993	if (dev_idx) {
2994		dev = __dev_get_by_index(net, dev_idx);
2995		if (!dev) {
2996			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2997			return -ENODEV;
2998		}
2999	}
3000
3001	if (!dst) {
3002		NL_SET_ERR_MSG(extack, "Network address not specified");
3003		return -EINVAL;
3004	}
3005
3006	if (ndm_flags & NTF_PROXY) {
3007		struct pneigh_entry *pn;
3008
3009		pn = pneigh_lookup(tbl, net, dst, dev, 0);
3010		if (!pn) {
3011			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3012			return -ENOENT;
3013		}
3014		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3015					nlh->nlmsg_seq, tbl);
3016	}
3017
3018	if (!dev) {
3019		NL_SET_ERR_MSG(extack, "No device specified");
3020		return -EINVAL;
3021	}
3022
3023	neigh = neigh_lookup(tbl, dst, dev);
3024	if (!neigh) {
3025		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3026		return -ENOENT;
3027	}
3028
3029	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3030			      nlh->nlmsg_seq);
3031
3032	neigh_release(neigh);
3033
3034	return err;
3035}
3036
3037void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3038{
3039	int chain;
3040	struct neigh_hash_table *nht;
3041
3042	rcu_read_lock();
3043	nht = rcu_dereference(tbl->nht);
3044
3045	read_lock_bh(&tbl->lock); /* avoid resizes */
3046	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3047		struct neighbour *n;
3048
3049		neigh_for_each_in_bucket(n, &nht->hash_heads[chain])
 
 
3050			cb(n, cookie);
3051	}
3052	read_unlock_bh(&tbl->lock);
3053	rcu_read_unlock();
3054}
3055EXPORT_SYMBOL(neigh_for_each);
3056
3057/* The tbl->lock must be held as a writer and BH disabled. */
3058void __neigh_for_each_release(struct neigh_table *tbl,
3059			      int (*cb)(struct neighbour *))
3060{
 
3061	struct neigh_hash_table *nht;
3062	int chain;
3063
3064	nht = rcu_dereference_protected(tbl->nht,
3065					lockdep_is_held(&tbl->lock));
3066	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3067		struct hlist_node *tmp;
3068		struct neighbour *n;
 
3069
3070		neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[chain]) {
 
 
3071			int release;
3072
3073			write_lock(&n->lock);
3074			release = cb(n);
3075			if (release) {
3076				hlist_del_rcu(&n->hash);
3077				hlist_del_rcu(&n->dev_list);
3078				neigh_mark_dead(n);
3079			}
 
 
3080			write_unlock(&n->lock);
3081			if (release)
3082				neigh_cleanup_and_release(n);
3083		}
3084	}
3085}
3086EXPORT_SYMBOL(__neigh_for_each_release);
3087
3088int neigh_xmit(int index, struct net_device *dev,
3089	       const void *addr, struct sk_buff *skb)
3090{
3091	int err = -EAFNOSUPPORT;
3092
3093	if (likely(index < NEIGH_NR_TABLES)) {
3094		struct neigh_table *tbl;
3095		struct neighbour *neigh;
3096
3097		rcu_read_lock();
3098		tbl = rcu_dereference(neigh_tables[index]);
3099		if (!tbl)
3100			goto out_unlock;
3101		if (index == NEIGH_ARP_TABLE) {
3102			u32 key = *((u32 *)addr);
3103
3104			neigh = __ipv4_neigh_lookup_noref(dev, key);
3105		} else {
3106			neigh = __neigh_lookup_noref(tbl, addr, dev);
3107		}
3108		if (!neigh)
3109			neigh = __neigh_create(tbl, addr, dev, false);
3110		err = PTR_ERR(neigh);
3111		if (IS_ERR(neigh)) {
3112			rcu_read_unlock();
3113			goto out_kfree_skb;
3114		}
3115		err = READ_ONCE(neigh->output)(neigh, skb);
3116out_unlock:
3117		rcu_read_unlock();
3118	}
3119	else if (index == NEIGH_LINK_TABLE) {
3120		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3121				      addr, NULL, skb->len);
3122		if (err < 0)
3123			goto out_kfree_skb;
3124		err = dev_queue_xmit(skb);
3125	}
3126out:
3127	return err;
3128out_kfree_skb:
3129	kfree_skb(skb);
3130	goto out;
3131}
3132EXPORT_SYMBOL(neigh_xmit);
3133
3134#ifdef CONFIG_PROC_FS
3135
3136static struct neighbour *neigh_get_valid(struct seq_file *seq,
3137					 struct neighbour *n,
3138					 loff_t *pos)
3139{
3140	struct neigh_seq_state *state = seq->private;
3141	struct net *net = seq_file_net(seq);
3142
3143	if (!net_eq(dev_net(n->dev), net))
3144		return NULL;
3145
3146	if (state->neigh_sub_iter) {
3147		loff_t fakep = 0;
3148		void *v;
3149
3150		v = state->neigh_sub_iter(state, n, pos ? pos : &fakep);
3151		if (!v)
3152			return NULL;
3153		if (pos)
3154			return v;
3155	}
3156
3157	if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3158		return n;
3159
3160	if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3161		return n;
3162
3163	return NULL;
3164}
3165
3166static struct neighbour *neigh_get_first(struct seq_file *seq)
3167{
3168	struct neigh_seq_state *state = seq->private;
3169	struct neigh_hash_table *nht = state->nht;
3170	struct neighbour *n, *tmp;
 
3171
3172	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
 
 
3173
3174	while (++state->bucket < (1 << nht->hash_shift)) {
3175		neigh_for_each_in_bucket(n, &nht->hash_heads[state->bucket]) {
3176			tmp = neigh_get_valid(seq, n, NULL);
3177			if (tmp)
3178				return tmp;
 
 
 
 
 
 
 
 
 
 
 
 
3179		}
 
 
 
3180	}
 
3181
3182	return NULL;
3183}
3184
3185static struct neighbour *neigh_get_next(struct seq_file *seq,
3186					struct neighbour *n,
3187					loff_t *pos)
3188{
3189	struct neigh_seq_state *state = seq->private;
3190	struct neighbour *tmp;
 
3191
3192	if (state->neigh_sub_iter) {
3193		void *v = state->neigh_sub_iter(state, n, pos);
3194
3195		if (v)
3196			return n;
3197	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3198
3199	hlist_for_each_entry_continue(n, hash) {
3200		tmp = neigh_get_valid(seq, n, pos);
3201		if (tmp) {
3202			n = tmp;
3203			goto out;
3204		}
 
 
 
 
 
 
 
 
3205	}
3206
3207	n = neigh_get_first(seq);
3208out:
3209	if (n && pos)
3210		--(*pos);
3211
3212	return n;
3213}
3214
3215static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3216{
3217	struct neighbour *n = neigh_get_first(seq);
3218
3219	if (n) {
3220		--(*pos);
3221		while (*pos) {
3222			n = neigh_get_next(seq, n, pos);
3223			if (!n)
3224				break;
3225		}
3226	}
3227	return *pos ? NULL : n;
3228}
3229
3230static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3231{
3232	struct neigh_seq_state *state = seq->private;
3233	struct net *net = seq_file_net(seq);
3234	struct neigh_table *tbl = state->tbl;
3235	struct pneigh_entry *pn = NULL;
3236	int bucket;
3237
3238	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3239	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3240		pn = tbl->phash_buckets[bucket];
3241		while (pn && !net_eq(pneigh_net(pn), net))
3242			pn = pn->next;
3243		if (pn)
3244			break;
3245	}
3246	state->bucket = bucket;
3247
3248	return pn;
3249}
3250
3251static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3252					    struct pneigh_entry *pn,
3253					    loff_t *pos)
3254{
3255	struct neigh_seq_state *state = seq->private;
3256	struct net *net = seq_file_net(seq);
3257	struct neigh_table *tbl = state->tbl;
3258
3259	do {
3260		pn = pn->next;
3261	} while (pn && !net_eq(pneigh_net(pn), net));
3262
3263	while (!pn) {
3264		if (++state->bucket > PNEIGH_HASHMASK)
3265			break;
3266		pn = tbl->phash_buckets[state->bucket];
3267		while (pn && !net_eq(pneigh_net(pn), net))
3268			pn = pn->next;
3269		if (pn)
3270			break;
3271	}
3272
3273	if (pn && pos)
3274		--(*pos);
3275
3276	return pn;
3277}
3278
3279static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3280{
3281	struct pneigh_entry *pn = pneigh_get_first(seq);
3282
3283	if (pn) {
3284		--(*pos);
3285		while (*pos) {
3286			pn = pneigh_get_next(seq, pn, pos);
3287			if (!pn)
3288				break;
3289		}
3290	}
3291	return *pos ? NULL : pn;
3292}
3293
3294static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3295{
3296	struct neigh_seq_state *state = seq->private;
3297	void *rc;
3298	loff_t idxpos = *pos;
3299
3300	rc = neigh_get_idx(seq, &idxpos);
3301	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3302		rc = pneigh_get_idx(seq, &idxpos);
3303
3304	return rc;
3305}
3306
3307void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3308	__acquires(tbl->lock)
3309	__acquires(rcu)
3310{
3311	struct neigh_seq_state *state = seq->private;
3312
3313	state->tbl = tbl;
3314	state->bucket = -1;
3315	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3316
3317	rcu_read_lock();
3318	state->nht = rcu_dereference(tbl->nht);
3319	read_lock_bh(&tbl->lock);
3320
3321	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3322}
3323EXPORT_SYMBOL(neigh_seq_start);
3324
3325void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3326{
3327	struct neigh_seq_state *state;
3328	void *rc;
3329
3330	if (v == SEQ_START_TOKEN) {
3331		rc = neigh_get_first(seq);
3332		goto out;
3333	}
3334
3335	state = seq->private;
3336	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3337		rc = neigh_get_next(seq, v, NULL);
3338		if (rc)
3339			goto out;
3340		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3341			rc = pneigh_get_first(seq);
3342	} else {
3343		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3344		rc = pneigh_get_next(seq, v, NULL);
3345	}
3346out:
3347	++(*pos);
3348	return rc;
3349}
3350EXPORT_SYMBOL(neigh_seq_next);
3351
3352void neigh_seq_stop(struct seq_file *seq, void *v)
3353	__releases(tbl->lock)
3354	__releases(rcu)
3355{
3356	struct neigh_seq_state *state = seq->private;
3357	struct neigh_table *tbl = state->tbl;
3358
3359	read_unlock_bh(&tbl->lock);
3360	rcu_read_unlock();
3361}
3362EXPORT_SYMBOL(neigh_seq_stop);
3363
3364/* statistics via seq_file */
3365
3366static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3367{
3368	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3369	int cpu;
3370
3371	if (*pos == 0)
3372		return SEQ_START_TOKEN;
3373
3374	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3375		if (!cpu_possible(cpu))
3376			continue;
3377		*pos = cpu+1;
3378		return per_cpu_ptr(tbl->stats, cpu);
3379	}
3380	return NULL;
3381}
3382
3383static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3384{
3385	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3386	int cpu;
3387
3388	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3389		if (!cpu_possible(cpu))
3390			continue;
3391		*pos = cpu+1;
3392		return per_cpu_ptr(tbl->stats, cpu);
3393	}
3394	(*pos)++;
3395	return NULL;
3396}
3397
3398static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3399{
3400
3401}
3402
3403static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3404{
3405	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3406	struct neigh_statistics *st = v;
3407
3408	if (v == SEQ_START_TOKEN) {
3409		seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3410		return 0;
3411	}
3412
3413	seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3414			"%08lx         %08lx         %08lx         "
3415			"%08lx       %08lx            %08lx\n",
3416		   atomic_read(&tbl->entries),
3417
3418		   st->allocs,
3419		   st->destroys,
3420		   st->hash_grows,
3421
3422		   st->lookups,
3423		   st->hits,
3424
3425		   st->res_failed,
3426
3427		   st->rcv_probes_mcast,
3428		   st->rcv_probes_ucast,
3429
3430		   st->periodic_gc_runs,
3431		   st->forced_gc_runs,
3432		   st->unres_discards,
3433		   st->table_fulls
3434		   );
3435
3436	return 0;
3437}
3438
3439static const struct seq_operations neigh_stat_seq_ops = {
3440	.start	= neigh_stat_seq_start,
3441	.next	= neigh_stat_seq_next,
3442	.stop	= neigh_stat_seq_stop,
3443	.show	= neigh_stat_seq_show,
3444};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3445#endif /* CONFIG_PROC_FS */
3446
3447static void __neigh_notify(struct neighbour *n, int type, int flags,
3448			   u32 pid)
 
 
 
 
 
 
 
 
3449{
 
3450	struct sk_buff *skb;
3451	int err = -ENOBUFS;
3452	struct net *net;
3453
3454	rcu_read_lock();
3455	net = dev_net_rcu(n->dev);
3456	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3457	if (skb == NULL)
3458		goto errout;
3459
3460	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3461	if (err < 0) {
3462		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3463		WARN_ON(err == -EMSGSIZE);
3464		kfree_skb(skb);
3465		goto errout;
3466	}
3467	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3468	goto out;
3469errout:
3470	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3471out:
3472	rcu_read_unlock();
3473}
3474
3475void neigh_app_ns(struct neighbour *n)
3476{
3477	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3478}
3479EXPORT_SYMBOL(neigh_app_ns);
3480
3481#ifdef CONFIG_SYSCTL
 
 
3482static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3483
3484static int proc_unres_qlen(const struct ctl_table *ctl, int write,
3485			   void *buffer, size_t *lenp, loff_t *ppos)
3486{
3487	int size, ret;
3488	struct ctl_table tmp = *ctl;
3489
3490	tmp.extra1 = SYSCTL_ZERO;
3491	tmp.extra2 = &unres_qlen_max;
3492	tmp.data = &size;
3493
3494	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3495	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3496
3497	if (write && !ret)
3498		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3499	return ret;
3500}
3501
 
 
 
 
 
 
 
 
 
 
 
 
3502static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3503				  int index)
3504{
3505	struct net_device *dev;
3506	int family = neigh_parms_family(p);
3507
3508	rcu_read_lock();
3509	for_each_netdev_rcu(net, dev) {
3510		struct neigh_parms *dst_p =
3511				neigh_get_dev_parms_rcu(dev, family);
3512
3513		if (dst_p && !test_bit(index, dst_p->data_state))
3514			dst_p->data[index] = p->data[index];
3515	}
3516	rcu_read_unlock();
3517}
3518
3519static void neigh_proc_update(const struct ctl_table *ctl, int write)
3520{
3521	struct net_device *dev = ctl->extra1;
3522	struct neigh_parms *p = ctl->extra2;
3523	struct net *net = neigh_parms_net(p);
3524	int index = (int *) ctl->data - p->data;
3525
3526	if (!write)
3527		return;
3528
3529	set_bit(index, p->data_state);
3530	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3531		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3532	if (!dev) /* NULL dev means this is default value */
3533		neigh_copy_dflt_parms(net, p, index);
3534}
3535
3536static int neigh_proc_dointvec_zero_intmax(const struct ctl_table *ctl, int write,
3537					   void *buffer, size_t *lenp,
3538					   loff_t *ppos)
3539{
3540	struct ctl_table tmp = *ctl;
3541	int ret;
3542
3543	tmp.extra1 = SYSCTL_ZERO;
3544	tmp.extra2 = SYSCTL_INT_MAX;
3545
3546	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3547	neigh_proc_update(ctl, write);
3548	return ret;
3549}
3550
3551static int neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table *ctl, int write,
3552						   void *buffer, size_t *lenp, loff_t *ppos)
3553{
3554	struct ctl_table tmp = *ctl;
3555	int ret;
3556
3557	int min = msecs_to_jiffies(1);
3558
3559	tmp.extra1 = &min;
3560	tmp.extra2 = NULL;
3561
3562	ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3563	neigh_proc_update(ctl, write);
3564	return ret;
3565}
3566
3567int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer,
3568			size_t *lenp, loff_t *ppos)
3569{
3570	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3571
3572	neigh_proc_update(ctl, write);
3573	return ret;
3574}
3575EXPORT_SYMBOL(neigh_proc_dointvec);
3576
3577int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer,
 
3578				size_t *lenp, loff_t *ppos)
3579{
3580	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3581
3582	neigh_proc_update(ctl, write);
3583	return ret;
3584}
3585EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3586
3587static int neigh_proc_dointvec_userhz_jiffies(const struct ctl_table *ctl, int write,
3588					      void *buffer, size_t *lenp,
3589					      loff_t *ppos)
3590{
3591	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3592
3593	neigh_proc_update(ctl, write);
3594	return ret;
3595}
3596
3597int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
3598				   void *buffer, size_t *lenp, loff_t *ppos)
 
3599{
3600	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3601
3602	neigh_proc_update(ctl, write);
3603	return ret;
3604}
3605EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3606
3607static int neigh_proc_dointvec_unres_qlen(const struct ctl_table *ctl, int write,
3608					  void *buffer, size_t *lenp,
3609					  loff_t *ppos)
3610{
3611	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3612
3613	neigh_proc_update(ctl, write);
3614	return ret;
3615}
3616
3617static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write,
3618					  void *buffer, size_t *lenp,
3619					  loff_t *ppos)
3620{
3621	struct neigh_parms *p = ctl->extra2;
3622	int ret;
3623
3624	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3625		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3626	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3627		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3628	else
3629		ret = -1;
3630
3631	if (write && ret == 0) {
3632		/* update reachable_time as well, otherwise, the change will
3633		 * only be effective after the next time neigh_periodic_work
3634		 * decides to recompute it
3635		 */
3636		p->reachable_time =
3637			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3638	}
3639	return ret;
3640}
3641
3642#define NEIGH_PARMS_DATA_OFFSET(index)	\
3643	(&((struct neigh_parms *) 0)->data[index])
3644
3645#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3646	[NEIGH_VAR_ ## attr] = { \
3647		.procname	= name, \
3648		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3649		.maxlen		= sizeof(int), \
3650		.mode		= mval, \
3651		.proc_handler	= proc, \
3652	}
3653
3654#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3655	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3656
3657#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3658	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3659
3660#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3661	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3662
3663#define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3664	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3665
3666#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3667	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3668
3669#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3670	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3671
3672static struct neigh_sysctl_table {
3673	struct ctl_table_header *sysctl_header;
3674	struct ctl_table neigh_vars[NEIGH_VAR_MAX];
3675} neigh_sysctl_template __read_mostly = {
3676	.neigh_vars = {
3677		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3678		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3679		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3680		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3681		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3682		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3683		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3684		NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3685						       "interval_probe_time_ms"),
3686		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3687		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3688		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3689		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3690		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3691		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3692		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3693		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3694		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3695		[NEIGH_VAR_GC_INTERVAL] = {
3696			.procname	= "gc_interval",
3697			.maxlen		= sizeof(int),
3698			.mode		= 0644,
3699			.proc_handler	= proc_dointvec_jiffies,
3700		},
3701		[NEIGH_VAR_GC_THRESH1] = {
3702			.procname	= "gc_thresh1",
3703			.maxlen		= sizeof(int),
3704			.mode		= 0644,
3705			.extra1		= SYSCTL_ZERO,
3706			.extra2		= SYSCTL_INT_MAX,
3707			.proc_handler	= proc_dointvec_minmax,
3708		},
3709		[NEIGH_VAR_GC_THRESH2] = {
3710			.procname	= "gc_thresh2",
3711			.maxlen		= sizeof(int),
3712			.mode		= 0644,
3713			.extra1		= SYSCTL_ZERO,
3714			.extra2		= SYSCTL_INT_MAX,
3715			.proc_handler	= proc_dointvec_minmax,
3716		},
3717		[NEIGH_VAR_GC_THRESH3] = {
3718			.procname	= "gc_thresh3",
3719			.maxlen		= sizeof(int),
3720			.mode		= 0644,
3721			.extra1		= SYSCTL_ZERO,
3722			.extra2		= SYSCTL_INT_MAX,
3723			.proc_handler	= proc_dointvec_minmax,
3724		},
 
3725	},
3726};
3727
3728int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3729			  proc_handler *handler)
3730{
3731	int i;
3732	struct neigh_sysctl_table *t;
3733	const char *dev_name_source;
3734	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3735	char *p_name;
3736	size_t neigh_vars_size;
3737
3738	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3739	if (!t)
3740		goto err;
3741
3742	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3743		t->neigh_vars[i].data += (long) p;
3744		t->neigh_vars[i].extra1 = dev;
3745		t->neigh_vars[i].extra2 = p;
3746	}
3747
3748	neigh_vars_size = ARRAY_SIZE(t->neigh_vars);
3749	if (dev) {
3750		dev_name_source = dev->name;
3751		/* Terminate the table early */
3752		neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
 
3753	} else {
3754		struct neigh_table *tbl = p->tbl;
3755		dev_name_source = "default";
3756		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3757		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3758		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3759		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3760	}
3761
3762	if (handler) {
3763		/* RetransTime */
3764		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3765		/* ReachableTime */
3766		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3767		/* RetransTime (in milliseconds)*/
3768		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3769		/* ReachableTime (in milliseconds) */
3770		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3771	} else {
3772		/* Those handlers will update p->reachable_time after
3773		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3774		 * applied after the next neighbour update instead of waiting for
3775		 * neigh_periodic_work to update its value (can be multiple minutes)
3776		 * So any handler that replaces them should do this as well
3777		 */
3778		/* ReachableTime */
3779		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3780			neigh_proc_base_reachable_time;
3781		/* ReachableTime (in milliseconds) */
3782		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3783			neigh_proc_base_reachable_time;
3784	}
3785
 
 
 
 
3786	switch (neigh_parms_family(p)) {
3787	case AF_INET:
3788	      p_name = "ipv4";
3789	      break;
3790	case AF_INET6:
3791	      p_name = "ipv6";
3792	      break;
3793	default:
3794	      BUG();
3795	}
3796
3797	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3798		p_name, dev_name_source);
3799	t->sysctl_header = register_net_sysctl_sz(neigh_parms_net(p),
3800						  neigh_path, t->neigh_vars,
3801						  neigh_vars_size);
3802	if (!t->sysctl_header)
3803		goto free;
3804
3805	p->sysctl_table = t;
3806	return 0;
3807
3808free:
3809	kfree(t);
3810err:
3811	return -ENOBUFS;
3812}
3813EXPORT_SYMBOL(neigh_sysctl_register);
3814
3815void neigh_sysctl_unregister(struct neigh_parms *p)
3816{
3817	if (p->sysctl_table) {
3818		struct neigh_sysctl_table *t = p->sysctl_table;
3819		p->sysctl_table = NULL;
3820		unregister_net_sysctl_table(t->sysctl_header);
3821		kfree(t);
3822	}
3823}
3824EXPORT_SYMBOL(neigh_sysctl_unregister);
3825
3826#endif	/* CONFIG_SYSCTL */
3827
3828static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = {
3829	{.msgtype = RTM_NEWNEIGH, .doit = neigh_add},
3830	{.msgtype = RTM_DELNEIGH, .doit = neigh_delete},
3831	{.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info,
3832	 .flags = RTNL_FLAG_DUMP_UNLOCKED},
3833	{.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info},
3834	{.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set},
3835};
3836
3837static int __init neigh_init(void)
3838{
3839	rtnl_register_many(neigh_rtnl_msg_handlers);
 
 
 
 
 
 
 
3840	return 0;
3841}
3842
3843subsys_initcall(neigh_init);