Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *	Generic address resolution entity
   3 *
   4 *	Authors:
   5 *	Pedro Roque		<roque@di.fc.ul.pt>
   6 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   7 *
   8 *	This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 *
  13 *	Fixes:
  14 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  15 *	Harald Welte		Add neighbour cache statistics like rtstat
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/socket.h>
  25#include <linux/netdevice.h>
  26#include <linux/proc_fs.h>
  27#ifdef CONFIG_SYSCTL
  28#include <linux/sysctl.h>
  29#endif
  30#include <linux/times.h>
  31#include <net/net_namespace.h>
  32#include <net/neighbour.h>
  33#include <net/dst.h>
  34#include <net/sock.h>
  35#include <net/netevent.h>
  36#include <net/netlink.h>
  37#include <linux/rtnetlink.h>
  38#include <linux/random.h>
  39#include <linux/string.h>
  40#include <linux/log2.h>
  41#include <linux/inetdevice.h>
  42#include <net/addrconf.h>
  43
  44#define DEBUG
  45#define NEIGH_DEBUG 1
  46#define neigh_dbg(level, fmt, ...)		\
  47do {						\
  48	if (level <= NEIGH_DEBUG)		\
  49		pr_debug(fmt, ##__VA_ARGS__);	\
  50} while (0)
  51
  52#define PNEIGH_HASHMASK		0xF
  53
  54static void neigh_timer_handler(unsigned long arg);
  55static void __neigh_notify(struct neighbour *n, int type, int flags);
  56static void neigh_update_notify(struct neighbour *neigh);
  57static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
  58
  59#ifdef CONFIG_PROC_FS
  60static const struct file_operations neigh_stat_seq_fops;
  61#endif
  62
  63/*
  64   Neighbour hash table buckets are protected with rwlock tbl->lock.
  65
  66   - All the scans/updates to hash buckets MUST be made under this lock.
  67   - NOTHING clever should be made under this lock: no callbacks
  68     to protocol backends, no attempts to send something to network.
  69     It will result in deadlocks, if backend/driver wants to use neighbour
  70     cache.
  71   - If the entry requires some non-trivial actions, increase
  72     its reference count and release table lock.
  73
  74   Neighbour entries are protected:
  75   - with reference count.
  76   - with rwlock neigh->lock
  77
  78   Reference count prevents destruction.
  79
  80   neigh->lock mainly serializes ll address data and its validity state.
  81   However, the same lock is used to protect another entry fields:
  82    - timer
  83    - resolution queue
  84
  85   Again, nothing clever shall be made under neigh->lock,
  86   the most complicated procedure, which we allow is dev->hard_header.
  87   It is supposed, that dev->hard_header is simplistic and does
  88   not make callbacks to neighbour tables.
  89 */
  90
  91static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
  92{
  93	kfree_skb(skb);
  94	return -ENETDOWN;
  95}
  96
  97static void neigh_cleanup_and_release(struct neighbour *neigh)
  98{
  99	if (neigh->parms->neigh_cleanup)
 100		neigh->parms->neigh_cleanup(neigh);
 101
 102	__neigh_notify(neigh, RTM_DELNEIGH, 0);
 
 103	neigh_release(neigh);
 104}
 105
 106/*
 107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 108 * It corresponds to default IPv6 settings and is not overridable,
 109 * because it is really reasonable choice.
 110 */
 111
 112unsigned long neigh_rand_reach_time(unsigned long base)
 113{
 114	return base ? (prandom_u32() % base) + (base >> 1) : 0;
 115}
 116EXPORT_SYMBOL(neigh_rand_reach_time);
 117
 118
 119static int neigh_forced_gc(struct neigh_table *tbl)
 120{
 121	int shrunk = 0;
 122	int i;
 123	struct neigh_hash_table *nht;
 124
 125	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 126
 127	write_lock_bh(&tbl->lock);
 128	nht = rcu_dereference_protected(tbl->nht,
 129					lockdep_is_held(&tbl->lock));
 130	for (i = 0; i < (1 << nht->hash_shift); i++) {
 131		struct neighbour *n;
 132		struct neighbour __rcu **np;
 133
 134		np = &nht->hash_buckets[i];
 135		while ((n = rcu_dereference_protected(*np,
 136					lockdep_is_held(&tbl->lock))) != NULL) {
 137			/* Neighbour record may be discarded if:
 138			 * - nobody refers to it.
 139			 * - it is not permanent
 140			 */
 141			write_lock(&n->lock);
 142			if (atomic_read(&n->refcnt) == 1 &&
 143			    !(n->nud_state & NUD_PERMANENT)) {
 144				rcu_assign_pointer(*np,
 145					rcu_dereference_protected(n->next,
 146						  lockdep_is_held(&tbl->lock)));
 147				n->dead = 1;
 148				shrunk	= 1;
 149				write_unlock(&n->lock);
 150				neigh_cleanup_and_release(n);
 151				continue;
 152			}
 153			write_unlock(&n->lock);
 154			np = &n->next;
 155		}
 156	}
 157
 158	tbl->last_flush = jiffies;
 159
 160	write_unlock_bh(&tbl->lock);
 161
 162	return shrunk;
 163}
 164
 165static void neigh_add_timer(struct neighbour *n, unsigned long when)
 166{
 167	neigh_hold(n);
 168	if (unlikely(mod_timer(&n->timer, when))) {
 169		printk("NEIGH: BUG, double timer add, state is %x\n",
 170		       n->nud_state);
 171		dump_stack();
 172	}
 173}
 174
 175static int neigh_del_timer(struct neighbour *n)
 176{
 177	if ((n->nud_state & NUD_IN_TIMER) &&
 178	    del_timer(&n->timer)) {
 179		neigh_release(n);
 180		return 1;
 181	}
 182	return 0;
 183}
 184
 185static void pneigh_queue_purge(struct sk_buff_head *list)
 186{
 187	struct sk_buff *skb;
 188
 189	while ((skb = skb_dequeue(list)) != NULL) {
 190		dev_put(skb->dev);
 191		kfree_skb(skb);
 192	}
 193}
 194
 195static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 196{
 197	int i;
 198	struct neigh_hash_table *nht;
 199
 200	nht = rcu_dereference_protected(tbl->nht,
 201					lockdep_is_held(&tbl->lock));
 202
 203	for (i = 0; i < (1 << nht->hash_shift); i++) {
 204		struct neighbour *n;
 205		struct neighbour __rcu **np = &nht->hash_buckets[i];
 206
 207		while ((n = rcu_dereference_protected(*np,
 208					lockdep_is_held(&tbl->lock))) != NULL) {
 209			if (dev && n->dev != dev) {
 210				np = &n->next;
 211				continue;
 212			}
 213			rcu_assign_pointer(*np,
 214				   rcu_dereference_protected(n->next,
 215						lockdep_is_held(&tbl->lock)));
 216			write_lock(&n->lock);
 217			neigh_del_timer(n);
 218			n->dead = 1;
 219
 220			if (atomic_read(&n->refcnt) != 1) {
 221				/* The most unpleasant situation.
 222				   We must destroy neighbour entry,
 223				   but someone still uses it.
 224
 225				   The destroy will be delayed until
 226				   the last user releases us, but
 227				   we must kill timers etc. and move
 228				   it to safe state.
 229				 */
 230				__skb_queue_purge(&n->arp_queue);
 231				n->arp_queue_len_bytes = 0;
 232				n->output = neigh_blackhole;
 233				if (n->nud_state & NUD_VALID)
 234					n->nud_state = NUD_NOARP;
 235				else
 236					n->nud_state = NUD_NONE;
 237				neigh_dbg(2, "neigh %p is stray\n", n);
 238			}
 239			write_unlock(&n->lock);
 240			neigh_cleanup_and_release(n);
 241		}
 242	}
 243}
 244
 245void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 246{
 247	write_lock_bh(&tbl->lock);
 248	neigh_flush_dev(tbl, dev);
 249	write_unlock_bh(&tbl->lock);
 250}
 251EXPORT_SYMBOL(neigh_changeaddr);
 252
 253int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 254{
 255	write_lock_bh(&tbl->lock);
 256	neigh_flush_dev(tbl, dev);
 257	pneigh_ifdown(tbl, dev);
 258	write_unlock_bh(&tbl->lock);
 259
 260	del_timer_sync(&tbl->proxy_timer);
 261	pneigh_queue_purge(&tbl->proxy_queue);
 262	return 0;
 263}
 264EXPORT_SYMBOL(neigh_ifdown);
 265
 266static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
 267{
 268	struct neighbour *n = NULL;
 269	unsigned long now = jiffies;
 270	int entries;
 271
 272	entries = atomic_inc_return(&tbl->entries) - 1;
 273	if (entries >= tbl->gc_thresh3 ||
 274	    (entries >= tbl->gc_thresh2 &&
 275	     time_after(now, tbl->last_flush + 5 * HZ))) {
 276		if (!neigh_forced_gc(tbl) &&
 277		    entries >= tbl->gc_thresh3) {
 278			net_info_ratelimited("%s: neighbor table overflow!\n",
 279					     tbl->id);
 280			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 281			goto out_entries;
 282		}
 283	}
 284
 285	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 286	if (!n)
 287		goto out_entries;
 288
 289	__skb_queue_head_init(&n->arp_queue);
 290	rwlock_init(&n->lock);
 291	seqlock_init(&n->ha_lock);
 292	n->updated	  = n->used = now;
 293	n->nud_state	  = NUD_NONE;
 294	n->output	  = neigh_blackhole;
 295	seqlock_init(&n->hh.hh_lock);
 296	n->parms	  = neigh_parms_clone(&tbl->parms);
 297	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
 298
 299	NEIGH_CACHE_STAT_INC(tbl, allocs);
 300	n->tbl		  = tbl;
 301	atomic_set(&n->refcnt, 1);
 302	n->dead		  = 1;
 303out:
 304	return n;
 305
 306out_entries:
 307	atomic_dec(&tbl->entries);
 308	goto out;
 309}
 310
 311static void neigh_get_hash_rnd(u32 *x)
 312{
 313	get_random_bytes(x, sizeof(*x));
 314	*x |= 1;
 315}
 316
 317static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 318{
 319	size_t size = (1 << shift) * sizeof(struct neighbour *);
 320	struct neigh_hash_table *ret;
 321	struct neighbour __rcu **buckets;
 322	int i;
 323
 324	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 325	if (!ret)
 326		return NULL;
 327	if (size <= PAGE_SIZE)
 328		buckets = kzalloc(size, GFP_ATOMIC);
 329	else
 330		buckets = (struct neighbour __rcu **)
 331			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 332					   get_order(size));
 333	if (!buckets) {
 334		kfree(ret);
 335		return NULL;
 336	}
 337	ret->hash_buckets = buckets;
 338	ret->hash_shift = shift;
 339	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 340		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 341	return ret;
 342}
 343
 344static void neigh_hash_free_rcu(struct rcu_head *head)
 345{
 346	struct neigh_hash_table *nht = container_of(head,
 347						    struct neigh_hash_table,
 348						    rcu);
 349	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 350	struct neighbour __rcu **buckets = nht->hash_buckets;
 351
 352	if (size <= PAGE_SIZE)
 353		kfree(buckets);
 354	else
 355		free_pages((unsigned long)buckets, get_order(size));
 356	kfree(nht);
 357}
 358
 359static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 360						unsigned long new_shift)
 361{
 362	unsigned int i, hash;
 363	struct neigh_hash_table *new_nht, *old_nht;
 364
 365	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 366
 367	old_nht = rcu_dereference_protected(tbl->nht,
 368					    lockdep_is_held(&tbl->lock));
 369	new_nht = neigh_hash_alloc(new_shift);
 370	if (!new_nht)
 371		return old_nht;
 372
 373	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 374		struct neighbour *n, *next;
 375
 376		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 377						   lockdep_is_held(&tbl->lock));
 378		     n != NULL;
 379		     n = next) {
 380			hash = tbl->hash(n->primary_key, n->dev,
 381					 new_nht->hash_rnd);
 382
 383			hash >>= (32 - new_nht->hash_shift);
 384			next = rcu_dereference_protected(n->next,
 385						lockdep_is_held(&tbl->lock));
 386
 387			rcu_assign_pointer(n->next,
 388					   rcu_dereference_protected(
 389						new_nht->hash_buckets[hash],
 390						lockdep_is_held(&tbl->lock)));
 391			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 392		}
 393	}
 394
 395	rcu_assign_pointer(tbl->nht, new_nht);
 396	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 397	return new_nht;
 398}
 399
 400struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 401			       struct net_device *dev)
 402{
 403	struct neighbour *n;
 404
 405	NEIGH_CACHE_STAT_INC(tbl, lookups);
 406
 407	rcu_read_lock_bh();
 408	n = __neigh_lookup_noref(tbl, pkey, dev);
 409	if (n) {
 410		if (!atomic_inc_not_zero(&n->refcnt))
 411			n = NULL;
 412		NEIGH_CACHE_STAT_INC(tbl, hits);
 413	}
 414
 415	rcu_read_unlock_bh();
 416	return n;
 417}
 418EXPORT_SYMBOL(neigh_lookup);
 419
 420struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 421				     const void *pkey)
 422{
 423	struct neighbour *n;
 424	int key_len = tbl->key_len;
 425	u32 hash_val;
 426	struct neigh_hash_table *nht;
 427
 428	NEIGH_CACHE_STAT_INC(tbl, lookups);
 429
 430	rcu_read_lock_bh();
 431	nht = rcu_dereference_bh(tbl->nht);
 432	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
 433
 434	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 435	     n != NULL;
 436	     n = rcu_dereference_bh(n->next)) {
 437		if (!memcmp(n->primary_key, pkey, key_len) &&
 438		    net_eq(dev_net(n->dev), net)) {
 439			if (!atomic_inc_not_zero(&n->refcnt))
 440				n = NULL;
 441			NEIGH_CACHE_STAT_INC(tbl, hits);
 442			break;
 443		}
 444	}
 445
 446	rcu_read_unlock_bh();
 447	return n;
 448}
 449EXPORT_SYMBOL(neigh_lookup_nodev);
 450
 451struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 452				 struct net_device *dev, bool want_ref)
 453{
 454	u32 hash_val;
 455	int key_len = tbl->key_len;
 456	int error;
 457	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
 458	struct neigh_hash_table *nht;
 459
 460	if (!n) {
 461		rc = ERR_PTR(-ENOBUFS);
 462		goto out;
 463	}
 464
 465	memcpy(n->primary_key, pkey, key_len);
 466	n->dev = dev;
 467	dev_hold(dev);
 468
 469	/* Protocol specific setup. */
 470	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 471		rc = ERR_PTR(error);
 472		goto out_neigh_release;
 473	}
 474
 475	if (dev->netdev_ops->ndo_neigh_construct) {
 476		error = dev->netdev_ops->ndo_neigh_construct(n);
 477		if (error < 0) {
 478			rc = ERR_PTR(error);
 479			goto out_neigh_release;
 480		}
 481	}
 482
 483	/* Device specific setup. */
 484	if (n->parms->neigh_setup &&
 485	    (error = n->parms->neigh_setup(n)) < 0) {
 486		rc = ERR_PTR(error);
 487		goto out_neigh_release;
 488	}
 489
 490	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 491
 492	write_lock_bh(&tbl->lock);
 493	nht = rcu_dereference_protected(tbl->nht,
 494					lockdep_is_held(&tbl->lock));
 495
 496	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 497		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 498
 499	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 500
 501	if (n->parms->dead) {
 502		rc = ERR_PTR(-EINVAL);
 503		goto out_tbl_unlock;
 504	}
 505
 506	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 507					    lockdep_is_held(&tbl->lock));
 508	     n1 != NULL;
 509	     n1 = rcu_dereference_protected(n1->next,
 510			lockdep_is_held(&tbl->lock))) {
 511		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
 512			if (want_ref)
 513				neigh_hold(n1);
 514			rc = n1;
 515			goto out_tbl_unlock;
 516		}
 517	}
 518
 519	n->dead = 0;
 520	if (want_ref)
 521		neigh_hold(n);
 522	rcu_assign_pointer(n->next,
 523			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 524						     lockdep_is_held(&tbl->lock)));
 525	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 526	write_unlock_bh(&tbl->lock);
 527	neigh_dbg(2, "neigh %p is created\n", n);
 528	rc = n;
 529out:
 530	return rc;
 531out_tbl_unlock:
 532	write_unlock_bh(&tbl->lock);
 533out_neigh_release:
 534	neigh_release(n);
 535	goto out;
 536}
 537EXPORT_SYMBOL(__neigh_create);
 538
 539static u32 pneigh_hash(const void *pkey, int key_len)
 540{
 541	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 542	hash_val ^= (hash_val >> 16);
 543	hash_val ^= hash_val >> 8;
 544	hash_val ^= hash_val >> 4;
 545	hash_val &= PNEIGH_HASHMASK;
 546	return hash_val;
 547}
 548
 549static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 550					      struct net *net,
 551					      const void *pkey,
 552					      int key_len,
 553					      struct net_device *dev)
 554{
 555	while (n) {
 556		if (!memcmp(n->key, pkey, key_len) &&
 557		    net_eq(pneigh_net(n), net) &&
 558		    (n->dev == dev || !n->dev))
 559			return n;
 560		n = n->next;
 561	}
 562	return NULL;
 563}
 564
 565struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 566		struct net *net, const void *pkey, struct net_device *dev)
 567{
 568	int key_len = tbl->key_len;
 569	u32 hash_val = pneigh_hash(pkey, key_len);
 570
 571	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 572				 net, pkey, key_len, dev);
 573}
 574EXPORT_SYMBOL_GPL(__pneigh_lookup);
 575
 576struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 577				    struct net *net, const void *pkey,
 578				    struct net_device *dev, int creat)
 579{
 580	struct pneigh_entry *n;
 581	int key_len = tbl->key_len;
 582	u32 hash_val = pneigh_hash(pkey, key_len);
 583
 584	read_lock_bh(&tbl->lock);
 585	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 586			      net, pkey, key_len, dev);
 587	read_unlock_bh(&tbl->lock);
 588
 589	if (n || !creat)
 590		goto out;
 591
 592	ASSERT_RTNL();
 593
 594	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
 595	if (!n)
 596		goto out;
 597
 598	write_pnet(&n->net, net);
 599	memcpy(n->key, pkey, key_len);
 600	n->dev = dev;
 601	if (dev)
 602		dev_hold(dev);
 603
 604	if (tbl->pconstructor && tbl->pconstructor(n)) {
 605		if (dev)
 606			dev_put(dev);
 607		kfree(n);
 608		n = NULL;
 609		goto out;
 610	}
 611
 612	write_lock_bh(&tbl->lock);
 613	n->next = tbl->phash_buckets[hash_val];
 614	tbl->phash_buckets[hash_val] = n;
 615	write_unlock_bh(&tbl->lock);
 616out:
 617	return n;
 618}
 619EXPORT_SYMBOL(pneigh_lookup);
 620
 621
 622int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 623		  struct net_device *dev)
 624{
 625	struct pneigh_entry *n, **np;
 626	int key_len = tbl->key_len;
 627	u32 hash_val = pneigh_hash(pkey, key_len);
 628
 629	write_lock_bh(&tbl->lock);
 630	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 631	     np = &n->next) {
 632		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 633		    net_eq(pneigh_net(n), net)) {
 634			*np = n->next;
 635			write_unlock_bh(&tbl->lock);
 636			if (tbl->pdestructor)
 637				tbl->pdestructor(n);
 638			if (n->dev)
 639				dev_put(n->dev);
 640			kfree(n);
 641			return 0;
 642		}
 643	}
 644	write_unlock_bh(&tbl->lock);
 645	return -ENOENT;
 646}
 647
 648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 649{
 650	struct pneigh_entry *n, **np;
 651	u32 h;
 652
 653	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 654		np = &tbl->phash_buckets[h];
 655		while ((n = *np) != NULL) {
 656			if (!dev || n->dev == dev) {
 657				*np = n->next;
 658				if (tbl->pdestructor)
 659					tbl->pdestructor(n);
 660				if (n->dev)
 661					dev_put(n->dev);
 662				kfree(n);
 663				continue;
 664			}
 665			np = &n->next;
 666		}
 667	}
 668	return -ENOENT;
 669}
 670
 671static void neigh_parms_destroy(struct neigh_parms *parms);
 672
 673static inline void neigh_parms_put(struct neigh_parms *parms)
 674{
 675	if (atomic_dec_and_test(&parms->refcnt))
 676		neigh_parms_destroy(parms);
 677}
 678
 679/*
 680 *	neighbour must already be out of the table;
 681 *
 682 */
 683void neigh_destroy(struct neighbour *neigh)
 684{
 685	struct net_device *dev = neigh->dev;
 686
 687	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 688
 689	if (!neigh->dead) {
 690		pr_warn("Destroying alive neighbour %p\n", neigh);
 691		dump_stack();
 692		return;
 693	}
 694
 695	if (neigh_del_timer(neigh))
 696		pr_warn("Impossible event\n");
 697
 698	write_lock_bh(&neigh->lock);
 699	__skb_queue_purge(&neigh->arp_queue);
 700	write_unlock_bh(&neigh->lock);
 701	neigh->arp_queue_len_bytes = 0;
 702
 703	if (dev->netdev_ops->ndo_neigh_destroy)
 704		dev->netdev_ops->ndo_neigh_destroy(neigh);
 705
 706	dev_put(dev);
 707	neigh_parms_put(neigh->parms);
 708
 709	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 710
 711	atomic_dec(&neigh->tbl->entries);
 712	kfree_rcu(neigh, rcu);
 713}
 714EXPORT_SYMBOL(neigh_destroy);
 715
 716/* Neighbour state is suspicious;
 717   disable fast path.
 718
 719   Called with write_locked neigh.
 720 */
 721static void neigh_suspect(struct neighbour *neigh)
 722{
 723	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 724
 725	neigh->output = neigh->ops->output;
 726}
 727
 728/* Neighbour state is OK;
 729   enable fast path.
 730
 731   Called with write_locked neigh.
 732 */
 733static void neigh_connect(struct neighbour *neigh)
 734{
 735	neigh_dbg(2, "neigh %p is connected\n", neigh);
 736
 737	neigh->output = neigh->ops->connected_output;
 738}
 739
 740static void neigh_periodic_work(struct work_struct *work)
 741{
 742	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 743	struct neighbour *n;
 744	struct neighbour __rcu **np;
 745	unsigned int i;
 746	struct neigh_hash_table *nht;
 747
 748	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 749
 750	write_lock_bh(&tbl->lock);
 751	nht = rcu_dereference_protected(tbl->nht,
 752					lockdep_is_held(&tbl->lock));
 753
 754	/*
 755	 *	periodically recompute ReachableTime from random function
 756	 */
 757
 758	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 759		struct neigh_parms *p;
 760		tbl->last_rand = jiffies;
 761		list_for_each_entry(p, &tbl->parms_list, list)
 762			p->reachable_time =
 763				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 764	}
 765
 766	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
 767		goto out;
 768
 769	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 770		np = &nht->hash_buckets[i];
 771
 772		while ((n = rcu_dereference_protected(*np,
 773				lockdep_is_held(&tbl->lock))) != NULL) {
 774			unsigned int state;
 775
 776			write_lock(&n->lock);
 777
 778			state = n->nud_state;
 779			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
 780				write_unlock(&n->lock);
 781				goto next_elt;
 782			}
 783
 784			if (time_before(n->used, n->confirmed))
 785				n->used = n->confirmed;
 786
 787			if (atomic_read(&n->refcnt) == 1 &&
 788			    (state == NUD_FAILED ||
 789			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 790				*np = n->next;
 791				n->dead = 1;
 792				write_unlock(&n->lock);
 793				neigh_cleanup_and_release(n);
 794				continue;
 795			}
 796			write_unlock(&n->lock);
 797
 798next_elt:
 799			np = &n->next;
 800		}
 801		/*
 802		 * It's fine to release lock here, even if hash table
 803		 * grows while we are preempted.
 804		 */
 805		write_unlock_bh(&tbl->lock);
 806		cond_resched();
 807		write_lock_bh(&tbl->lock);
 808		nht = rcu_dereference_protected(tbl->nht,
 809						lockdep_is_held(&tbl->lock));
 810	}
 811out:
 812	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 813	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 814	 * BASE_REACHABLE_TIME.
 815	 */
 816	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 817			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 818	write_unlock_bh(&tbl->lock);
 819}
 820
 821static __inline__ int neigh_max_probes(struct neighbour *n)
 822{
 823	struct neigh_parms *p = n->parms;
 824	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 825	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 826	        NEIGH_VAR(p, MCAST_PROBES));
 827}
 828
 829static void neigh_invalidate(struct neighbour *neigh)
 830	__releases(neigh->lock)
 831	__acquires(neigh->lock)
 832{
 833	struct sk_buff *skb;
 834
 835	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 836	neigh_dbg(2, "neigh %p is failed\n", neigh);
 837	neigh->updated = jiffies;
 838
 839	/* It is very thin place. report_unreachable is very complicated
 840	   routine. Particularly, it can hit the same neighbour entry!
 841
 842	   So that, we try to be accurate and avoid dead loop. --ANK
 843	 */
 844	while (neigh->nud_state == NUD_FAILED &&
 845	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
 846		write_unlock(&neigh->lock);
 847		neigh->ops->error_report(neigh, skb);
 848		write_lock(&neigh->lock);
 849	}
 850	__skb_queue_purge(&neigh->arp_queue);
 851	neigh->arp_queue_len_bytes = 0;
 852}
 853
 854static void neigh_probe(struct neighbour *neigh)
 855	__releases(neigh->lock)
 856{
 857	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
 858	/* keep skb alive even if arp_queue overflows */
 859	if (skb)
 860		skb = skb_clone(skb, GFP_ATOMIC);
 861	write_unlock(&neigh->lock);
 862	neigh->ops->solicit(neigh, skb);
 863	atomic_inc(&neigh->probes);
 864	kfree_skb(skb);
 865}
 866
 867/* Called when a timer expires for a neighbour entry. */
 868
 869static void neigh_timer_handler(unsigned long arg)
 870{
 871	unsigned long now, next;
 872	struct neighbour *neigh = (struct neighbour *)arg;
 873	unsigned int state;
 874	int notify = 0;
 875
 876	write_lock(&neigh->lock);
 877
 878	state = neigh->nud_state;
 879	now = jiffies;
 880	next = now + HZ;
 881
 882	if (!(state & NUD_IN_TIMER))
 883		goto out;
 884
 885	if (state & NUD_REACHABLE) {
 886		if (time_before_eq(now,
 887				   neigh->confirmed + neigh->parms->reachable_time)) {
 888			neigh_dbg(2, "neigh %p is still alive\n", neigh);
 889			next = neigh->confirmed + neigh->parms->reachable_time;
 890		} else if (time_before_eq(now,
 891					  neigh->used +
 892					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 893			neigh_dbg(2, "neigh %p is delayed\n", neigh);
 894			neigh->nud_state = NUD_DELAY;
 895			neigh->updated = jiffies;
 896			neigh_suspect(neigh);
 897			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
 898		} else {
 899			neigh_dbg(2, "neigh %p is suspected\n", neigh);
 900			neigh->nud_state = NUD_STALE;
 901			neigh->updated = jiffies;
 902			neigh_suspect(neigh);
 903			notify = 1;
 904		}
 905	} else if (state & NUD_DELAY) {
 906		if (time_before_eq(now,
 907				   neigh->confirmed +
 908				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 909			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
 910			neigh->nud_state = NUD_REACHABLE;
 911			neigh->updated = jiffies;
 912			neigh_connect(neigh);
 913			notify = 1;
 914			next = neigh->confirmed + neigh->parms->reachable_time;
 915		} else {
 916			neigh_dbg(2, "neigh %p is probed\n", neigh);
 917			neigh->nud_state = NUD_PROBE;
 918			neigh->updated = jiffies;
 919			atomic_set(&neigh->probes, 0);
 920			notify = 1;
 921			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 922		}
 923	} else {
 924		/* NUD_PROBE|NUD_INCOMPLETE */
 925		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 926	}
 927
 928	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
 929	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
 930		neigh->nud_state = NUD_FAILED;
 931		notify = 1;
 932		neigh_invalidate(neigh);
 933		goto out;
 934	}
 935
 936	if (neigh->nud_state & NUD_IN_TIMER) {
 937		if (time_before(next, jiffies + HZ/2))
 938			next = jiffies + HZ/2;
 939		if (!mod_timer(&neigh->timer, next))
 940			neigh_hold(neigh);
 941	}
 942	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
 943		neigh_probe(neigh);
 944	} else {
 945out:
 946		write_unlock(&neigh->lock);
 947	}
 948
 949	if (notify)
 950		neigh_update_notify(neigh);
 951
 952	neigh_release(neigh);
 953}
 954
 955int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 956{
 957	int rc;
 958	bool immediate_probe = false;
 959
 960	write_lock_bh(&neigh->lock);
 961
 962	rc = 0;
 963	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
 964		goto out_unlock_bh;
 965	if (neigh->dead)
 966		goto out_dead;
 967
 968	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
 969		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
 970		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
 971			unsigned long next, now = jiffies;
 972
 973			atomic_set(&neigh->probes,
 974				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
 975			neigh->nud_state     = NUD_INCOMPLETE;
 976			neigh->updated = now;
 977			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
 978					 HZ/2);
 979			neigh_add_timer(neigh, next);
 980			immediate_probe = true;
 981		} else {
 982			neigh->nud_state = NUD_FAILED;
 983			neigh->updated = jiffies;
 984			write_unlock_bh(&neigh->lock);
 985
 986			kfree_skb(skb);
 987			return 1;
 988		}
 989	} else if (neigh->nud_state & NUD_STALE) {
 990		neigh_dbg(2, "neigh %p is delayed\n", neigh);
 991		neigh->nud_state = NUD_DELAY;
 992		neigh->updated = jiffies;
 993		neigh_add_timer(neigh, jiffies +
 994				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
 995	}
 996
 997	if (neigh->nud_state == NUD_INCOMPLETE) {
 998		if (skb) {
 999			while (neigh->arp_queue_len_bytes + skb->truesize >
1000			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1001				struct sk_buff *buff;
1002
1003				buff = __skb_dequeue(&neigh->arp_queue);
1004				if (!buff)
1005					break;
1006				neigh->arp_queue_len_bytes -= buff->truesize;
1007				kfree_skb(buff);
1008				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1009			}
1010			skb_dst_force(skb);
1011			__skb_queue_tail(&neigh->arp_queue, skb);
1012			neigh->arp_queue_len_bytes += skb->truesize;
1013		}
1014		rc = 1;
1015	}
1016out_unlock_bh:
1017	if (immediate_probe)
1018		neigh_probe(neigh);
1019	else
1020		write_unlock(&neigh->lock);
1021	local_bh_enable();
1022	return rc;
1023
1024out_dead:
1025	if (neigh->nud_state & NUD_STALE)
1026		goto out_unlock_bh;
1027	write_unlock_bh(&neigh->lock);
1028	kfree_skb(skb);
1029	return 1;
1030}
1031EXPORT_SYMBOL(__neigh_event_send);
1032
1033static void neigh_update_hhs(struct neighbour *neigh)
1034{
1035	struct hh_cache *hh;
1036	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1037		= NULL;
1038
1039	if (neigh->dev->header_ops)
1040		update = neigh->dev->header_ops->cache_update;
1041
1042	if (update) {
1043		hh = &neigh->hh;
1044		if (hh->hh_len) {
1045			write_seqlock_bh(&hh->hh_lock);
1046			update(hh, neigh->dev, neigh->ha);
1047			write_sequnlock_bh(&hh->hh_lock);
1048		}
1049	}
1050}
1051
1052
1053
1054/* Generic update routine.
1055   -- lladdr is new lladdr or NULL, if it is not supplied.
1056   -- new    is new state.
1057   -- flags
1058	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1059				if it is different.
1060	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1061				lladdr instead of overriding it
1062				if it is different.
1063				It also allows to retain current state
1064				if lladdr is unchanged.
1065	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1066
1067	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1068				NTF_ROUTER flag.
1069	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1070				a router.
1071
1072   Caller MUST hold reference count on the entry.
1073 */
1074
1075int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1076		 u32 flags)
1077{
1078	u8 old;
1079	int err;
1080	int notify = 0;
1081	struct net_device *dev;
1082	int update_isrouter = 0;
1083
1084	write_lock_bh(&neigh->lock);
1085
1086	dev    = neigh->dev;
1087	old    = neigh->nud_state;
1088	err    = -EPERM;
1089
1090	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1091	    (old & (NUD_NOARP | NUD_PERMANENT)))
1092		goto out;
1093	if (neigh->dead)
1094		goto out;
1095
1096	if (!(new & NUD_VALID)) {
1097		neigh_del_timer(neigh);
1098		if (old & NUD_CONNECTED)
1099			neigh_suspect(neigh);
1100		neigh->nud_state = new;
1101		err = 0;
1102		notify = old & NUD_VALID;
1103		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1104		    (new & NUD_FAILED)) {
1105			neigh_invalidate(neigh);
1106			notify = 1;
1107		}
1108		goto out;
1109	}
1110
1111	/* Compare new lladdr with cached one */
1112	if (!dev->addr_len) {
1113		/* First case: device needs no address. */
1114		lladdr = neigh->ha;
1115	} else if (lladdr) {
1116		/* The second case: if something is already cached
1117		   and a new address is proposed:
1118		   - compare new & old
1119		   - if they are different, check override flag
1120		 */
1121		if ((old & NUD_VALID) &&
1122		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1123			lladdr = neigh->ha;
1124	} else {
1125		/* No address is supplied; if we know something,
1126		   use it, otherwise discard the request.
1127		 */
1128		err = -EINVAL;
1129		if (!(old & NUD_VALID))
1130			goto out;
1131		lladdr = neigh->ha;
1132	}
1133
1134	if (new & NUD_CONNECTED)
1135		neigh->confirmed = jiffies;
1136	neigh->updated = jiffies;
1137
1138	/* If entry was valid and address is not changed,
1139	   do not change entry state, if new one is STALE.
1140	 */
1141	err = 0;
1142	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1143	if (old & NUD_VALID) {
1144		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1145			update_isrouter = 0;
1146			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1147			    (old & NUD_CONNECTED)) {
1148				lladdr = neigh->ha;
1149				new = NUD_STALE;
1150			} else
1151				goto out;
1152		} else {
1153			if (lladdr == neigh->ha && new == NUD_STALE &&
1154			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1155			     (old & NUD_CONNECTED))
1156			    )
1157				new = old;
1158		}
1159	}
1160
1161	if (new != old) {
1162		neigh_del_timer(neigh);
1163		if (new & NUD_PROBE)
1164			atomic_set(&neigh->probes, 0);
1165		if (new & NUD_IN_TIMER)
1166			neigh_add_timer(neigh, (jiffies +
1167						((new & NUD_REACHABLE) ?
1168						 neigh->parms->reachable_time :
1169						 0)));
1170		neigh->nud_state = new;
1171		notify = 1;
1172	}
1173
1174	if (lladdr != neigh->ha) {
1175		write_seqlock(&neigh->ha_lock);
1176		memcpy(&neigh->ha, lladdr, dev->addr_len);
1177		write_sequnlock(&neigh->ha_lock);
1178		neigh_update_hhs(neigh);
1179		if (!(new & NUD_CONNECTED))
1180			neigh->confirmed = jiffies -
1181				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1182		notify = 1;
1183	}
1184	if (new == old)
1185		goto out;
1186	if (new & NUD_CONNECTED)
1187		neigh_connect(neigh);
1188	else
1189		neigh_suspect(neigh);
1190	if (!(old & NUD_VALID)) {
1191		struct sk_buff *skb;
1192
1193		/* Again: avoid dead loop if something went wrong */
1194
1195		while (neigh->nud_state & NUD_VALID &&
1196		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1197			struct dst_entry *dst = skb_dst(skb);
1198			struct neighbour *n2, *n1 = neigh;
1199			write_unlock_bh(&neigh->lock);
1200
1201			rcu_read_lock();
1202
1203			/* Why not just use 'neigh' as-is?  The problem is that
1204			 * things such as shaper, eql, and sch_teql can end up
1205			 * using alternative, different, neigh objects to output
1206			 * the packet in the output path.  So what we need to do
1207			 * here is re-lookup the top-level neigh in the path so
1208			 * we can reinject the packet there.
1209			 */
1210			n2 = NULL;
1211			if (dst) {
1212				n2 = dst_neigh_lookup_skb(dst, skb);
1213				if (n2)
1214					n1 = n2;
1215			}
1216			n1->output(n1, skb);
1217			if (n2)
1218				neigh_release(n2);
1219			rcu_read_unlock();
1220
1221			write_lock_bh(&neigh->lock);
1222		}
1223		__skb_queue_purge(&neigh->arp_queue);
1224		neigh->arp_queue_len_bytes = 0;
1225	}
1226out:
1227	if (update_isrouter) {
1228		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1229			(neigh->flags | NTF_ROUTER) :
1230			(neigh->flags & ~NTF_ROUTER);
1231	}
1232	write_unlock_bh(&neigh->lock);
1233
1234	if (notify)
1235		neigh_update_notify(neigh);
1236
1237	return err;
1238}
1239EXPORT_SYMBOL(neigh_update);
1240
1241/* Update the neigh to listen temporarily for probe responses, even if it is
1242 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1243 */
1244void __neigh_set_probe_once(struct neighbour *neigh)
1245{
1246	if (neigh->dead)
1247		return;
1248	neigh->updated = jiffies;
1249	if (!(neigh->nud_state & NUD_FAILED))
1250		return;
1251	neigh->nud_state = NUD_INCOMPLETE;
1252	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253	neigh_add_timer(neigh,
1254			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255}
1256EXPORT_SYMBOL(__neigh_set_probe_once);
1257
1258struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1259				 u8 *lladdr, void *saddr,
1260				 struct net_device *dev)
1261{
1262	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1263						 lladdr || !dev->addr_len);
1264	if (neigh)
1265		neigh_update(neigh, lladdr, NUD_STALE,
1266			     NEIGH_UPDATE_F_OVERRIDE);
1267	return neigh;
1268}
1269EXPORT_SYMBOL(neigh_event_ns);
1270
1271/* called with read_lock_bh(&n->lock); */
1272static void neigh_hh_init(struct neighbour *n)
1273{
1274	struct net_device *dev = n->dev;
1275	__be16 prot = n->tbl->protocol;
1276	struct hh_cache	*hh = &n->hh;
1277
1278	write_lock_bh(&n->lock);
1279
1280	/* Only one thread can come in here and initialize the
1281	 * hh_cache entry.
1282	 */
1283	if (!hh->hh_len)
1284		dev->header_ops->cache(n, hh, prot);
1285
1286	write_unlock_bh(&n->lock);
1287}
1288
1289/* Slow and careful. */
1290
1291int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1292{
1293	int rc = 0;
1294
1295	if (!neigh_event_send(neigh, skb)) {
1296		int err;
1297		struct net_device *dev = neigh->dev;
1298		unsigned int seq;
1299
1300		if (dev->header_ops->cache && !neigh->hh.hh_len)
1301			neigh_hh_init(neigh);
1302
1303		do {
1304			__skb_pull(skb, skb_network_offset(skb));
1305			seq = read_seqbegin(&neigh->ha_lock);
1306			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1307					      neigh->ha, NULL, skb->len);
1308		} while (read_seqretry(&neigh->ha_lock, seq));
1309
1310		if (err >= 0)
1311			rc = dev_queue_xmit(skb);
1312		else
1313			goto out_kfree_skb;
1314	}
1315out:
1316	return rc;
1317out_kfree_skb:
1318	rc = -EINVAL;
1319	kfree_skb(skb);
1320	goto out;
1321}
1322EXPORT_SYMBOL(neigh_resolve_output);
1323
1324/* As fast as possible without hh cache */
1325
1326int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1327{
1328	struct net_device *dev = neigh->dev;
1329	unsigned int seq;
1330	int err;
1331
1332	do {
1333		__skb_pull(skb, skb_network_offset(skb));
1334		seq = read_seqbegin(&neigh->ha_lock);
1335		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1336				      neigh->ha, NULL, skb->len);
1337	} while (read_seqretry(&neigh->ha_lock, seq));
1338
1339	if (err >= 0)
1340		err = dev_queue_xmit(skb);
1341	else {
1342		err = -EINVAL;
1343		kfree_skb(skb);
1344	}
1345	return err;
1346}
1347EXPORT_SYMBOL(neigh_connected_output);
1348
1349int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1350{
1351	return dev_queue_xmit(skb);
1352}
1353EXPORT_SYMBOL(neigh_direct_output);
1354
1355static void neigh_proxy_process(unsigned long arg)
1356{
1357	struct neigh_table *tbl = (struct neigh_table *)arg;
1358	long sched_next = 0;
1359	unsigned long now = jiffies;
1360	struct sk_buff *skb, *n;
1361
1362	spin_lock(&tbl->proxy_queue.lock);
1363
1364	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1365		long tdif = NEIGH_CB(skb)->sched_next - now;
1366
1367		if (tdif <= 0) {
1368			struct net_device *dev = skb->dev;
1369
1370			__skb_unlink(skb, &tbl->proxy_queue);
1371			if (tbl->proxy_redo && netif_running(dev)) {
1372				rcu_read_lock();
1373				tbl->proxy_redo(skb);
1374				rcu_read_unlock();
1375			} else {
1376				kfree_skb(skb);
1377			}
1378
1379			dev_put(dev);
1380		} else if (!sched_next || tdif < sched_next)
1381			sched_next = tdif;
1382	}
1383	del_timer(&tbl->proxy_timer);
1384	if (sched_next)
1385		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1386	spin_unlock(&tbl->proxy_queue.lock);
1387}
1388
1389void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1390		    struct sk_buff *skb)
1391{
1392	unsigned long now = jiffies;
1393
1394	unsigned long sched_next = now + (prandom_u32() %
1395					  NEIGH_VAR(p, PROXY_DELAY));
1396
1397	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1398		kfree_skb(skb);
1399		return;
1400	}
1401
1402	NEIGH_CB(skb)->sched_next = sched_next;
1403	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1404
1405	spin_lock(&tbl->proxy_queue.lock);
1406	if (del_timer(&tbl->proxy_timer)) {
1407		if (time_before(tbl->proxy_timer.expires, sched_next))
1408			sched_next = tbl->proxy_timer.expires;
1409	}
1410	skb_dst_drop(skb);
1411	dev_hold(skb->dev);
1412	__skb_queue_tail(&tbl->proxy_queue, skb);
1413	mod_timer(&tbl->proxy_timer, sched_next);
1414	spin_unlock(&tbl->proxy_queue.lock);
1415}
1416EXPORT_SYMBOL(pneigh_enqueue);
1417
1418static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1419						      struct net *net, int ifindex)
1420{
1421	struct neigh_parms *p;
1422
1423	list_for_each_entry(p, &tbl->parms_list, list) {
1424		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1425		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1426			return p;
1427	}
1428
1429	return NULL;
1430}
1431
1432struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1433				      struct neigh_table *tbl)
1434{
1435	struct neigh_parms *p;
1436	struct net *net = dev_net(dev);
1437	const struct net_device_ops *ops = dev->netdev_ops;
1438
1439	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1440	if (p) {
1441		p->tbl		  = tbl;
1442		atomic_set(&p->refcnt, 1);
1443		p->reachable_time =
1444				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1445		dev_hold(dev);
1446		p->dev = dev;
1447		write_pnet(&p->net, net);
1448		p->sysctl_table = NULL;
1449
1450		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1451			dev_put(dev);
1452			kfree(p);
1453			return NULL;
1454		}
1455
1456		write_lock_bh(&tbl->lock);
1457		list_add(&p->list, &tbl->parms.list);
1458		write_unlock_bh(&tbl->lock);
1459
1460		neigh_parms_data_state_cleanall(p);
1461	}
1462	return p;
1463}
1464EXPORT_SYMBOL(neigh_parms_alloc);
1465
1466static void neigh_rcu_free_parms(struct rcu_head *head)
1467{
1468	struct neigh_parms *parms =
1469		container_of(head, struct neigh_parms, rcu_head);
1470
1471	neigh_parms_put(parms);
1472}
1473
1474void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1475{
1476	if (!parms || parms == &tbl->parms)
1477		return;
1478	write_lock_bh(&tbl->lock);
1479	list_del(&parms->list);
1480	parms->dead = 1;
1481	write_unlock_bh(&tbl->lock);
1482	if (parms->dev)
1483		dev_put(parms->dev);
1484	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1485}
1486EXPORT_SYMBOL(neigh_parms_release);
1487
1488static void neigh_parms_destroy(struct neigh_parms *parms)
1489{
1490	kfree(parms);
1491}
1492
1493static struct lock_class_key neigh_table_proxy_queue_class;
1494
1495static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1496
1497void neigh_table_init(int index, struct neigh_table *tbl)
1498{
1499	unsigned long now = jiffies;
1500	unsigned long phsize;
1501
1502	INIT_LIST_HEAD(&tbl->parms_list);
1503	list_add(&tbl->parms.list, &tbl->parms_list);
1504	write_pnet(&tbl->parms.net, &init_net);
1505	atomic_set(&tbl->parms.refcnt, 1);
1506	tbl->parms.reachable_time =
1507			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1508
1509	tbl->stats = alloc_percpu(struct neigh_statistics);
1510	if (!tbl->stats)
1511		panic("cannot create neighbour cache statistics");
1512
1513#ifdef CONFIG_PROC_FS
1514	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1515			      &neigh_stat_seq_fops, tbl))
1516		panic("cannot create neighbour proc dir entry");
1517#endif
1518
1519	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1520
1521	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1522	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1523
1524	if (!tbl->nht || !tbl->phash_buckets)
1525		panic("cannot allocate neighbour cache hashes");
1526
1527	if (!tbl->entry_size)
1528		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1529					tbl->key_len, NEIGH_PRIV_ALIGN);
1530	else
1531		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1532
1533	rwlock_init(&tbl->lock);
1534	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1535	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1536			tbl->parms.reachable_time);
1537	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1538	skb_queue_head_init_class(&tbl->proxy_queue,
1539			&neigh_table_proxy_queue_class);
1540
1541	tbl->last_flush = now;
1542	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1543
1544	neigh_tables[index] = tbl;
1545}
1546EXPORT_SYMBOL(neigh_table_init);
1547
1548int neigh_table_clear(int index, struct neigh_table *tbl)
1549{
1550	neigh_tables[index] = NULL;
1551	/* It is not clean... Fix it to unload IPv6 module safely */
1552	cancel_delayed_work_sync(&tbl->gc_work);
1553	del_timer_sync(&tbl->proxy_timer);
1554	pneigh_queue_purge(&tbl->proxy_queue);
1555	neigh_ifdown(tbl, NULL);
1556	if (atomic_read(&tbl->entries))
1557		pr_crit("neighbour leakage\n");
1558
1559	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1560		 neigh_hash_free_rcu);
1561	tbl->nht = NULL;
1562
1563	kfree(tbl->phash_buckets);
1564	tbl->phash_buckets = NULL;
1565
1566	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1567
1568	free_percpu(tbl->stats);
1569	tbl->stats = NULL;
1570
1571	return 0;
1572}
1573EXPORT_SYMBOL(neigh_table_clear);
1574
1575static struct neigh_table *neigh_find_table(int family)
1576{
1577	struct neigh_table *tbl = NULL;
1578
1579	switch (family) {
1580	case AF_INET:
1581		tbl = neigh_tables[NEIGH_ARP_TABLE];
1582		break;
1583	case AF_INET6:
1584		tbl = neigh_tables[NEIGH_ND_TABLE];
1585		break;
1586	case AF_DECnet:
1587		tbl = neigh_tables[NEIGH_DN_TABLE];
1588		break;
1589	}
1590
1591	return tbl;
1592}
1593
1594static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1595{
1596	struct net *net = sock_net(skb->sk);
1597	struct ndmsg *ndm;
1598	struct nlattr *dst_attr;
1599	struct neigh_table *tbl;
1600	struct neighbour *neigh;
1601	struct net_device *dev = NULL;
1602	int err = -EINVAL;
1603
1604	ASSERT_RTNL();
1605	if (nlmsg_len(nlh) < sizeof(*ndm))
1606		goto out;
1607
1608	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1609	if (dst_attr == NULL)
1610		goto out;
1611
1612	ndm = nlmsg_data(nlh);
1613	if (ndm->ndm_ifindex) {
1614		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1615		if (dev == NULL) {
1616			err = -ENODEV;
1617			goto out;
1618		}
1619	}
1620
1621	tbl = neigh_find_table(ndm->ndm_family);
1622	if (tbl == NULL)
1623		return -EAFNOSUPPORT;
1624
1625	if (nla_len(dst_attr) < tbl->key_len)
1626		goto out;
1627
1628	if (ndm->ndm_flags & NTF_PROXY) {
1629		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1630		goto out;
1631	}
1632
1633	if (dev == NULL)
1634		goto out;
1635
1636	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1637	if (neigh == NULL) {
1638		err = -ENOENT;
1639		goto out;
1640	}
1641
1642	err = neigh_update(neigh, NULL, NUD_FAILED,
1643			   NEIGH_UPDATE_F_OVERRIDE |
1644			   NEIGH_UPDATE_F_ADMIN);
1645	neigh_release(neigh);
1646
1647out:
1648	return err;
1649}
1650
1651static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1652{
1653	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1654	struct net *net = sock_net(skb->sk);
1655	struct ndmsg *ndm;
1656	struct nlattr *tb[NDA_MAX+1];
1657	struct neigh_table *tbl;
1658	struct net_device *dev = NULL;
1659	struct neighbour *neigh;
1660	void *dst, *lladdr;
1661	int err;
1662
1663	ASSERT_RTNL();
1664	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1665	if (err < 0)
1666		goto out;
1667
1668	err = -EINVAL;
1669	if (tb[NDA_DST] == NULL)
1670		goto out;
1671
1672	ndm = nlmsg_data(nlh);
1673	if (ndm->ndm_ifindex) {
1674		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1675		if (dev == NULL) {
1676			err = -ENODEV;
1677			goto out;
1678		}
1679
1680		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1681			goto out;
1682	}
1683
1684	tbl = neigh_find_table(ndm->ndm_family);
1685	if (tbl == NULL)
1686		return -EAFNOSUPPORT;
1687
1688	if (nla_len(tb[NDA_DST]) < tbl->key_len)
1689		goto out;
1690	dst = nla_data(tb[NDA_DST]);
1691	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1692
1693	if (ndm->ndm_flags & NTF_PROXY) {
1694		struct pneigh_entry *pn;
1695
1696		err = -ENOBUFS;
1697		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1698		if (pn) {
1699			pn->flags = ndm->ndm_flags;
1700			err = 0;
1701		}
1702		goto out;
1703	}
1704
1705	if (dev == NULL)
1706		goto out;
1707
1708	neigh = neigh_lookup(tbl, dst, dev);
1709	if (neigh == NULL) {
1710		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1711			err = -ENOENT;
1712			goto out;
1713		}
1714
1715		neigh = __neigh_lookup_errno(tbl, dst, dev);
1716		if (IS_ERR(neigh)) {
1717			err = PTR_ERR(neigh);
1718			goto out;
1719		}
1720	} else {
1721		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1722			err = -EEXIST;
1723			neigh_release(neigh);
1724			goto out;
1725		}
1726
1727		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1728			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1729	}
1730
1731	if (ndm->ndm_flags & NTF_USE) {
1732		neigh_event_send(neigh, NULL);
1733		err = 0;
1734	} else
1735		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1736	neigh_release(neigh);
1737
1738out:
1739	return err;
1740}
1741
1742static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1743{
1744	struct nlattr *nest;
1745
1746	nest = nla_nest_start(skb, NDTA_PARMS);
1747	if (nest == NULL)
1748		return -ENOBUFS;
1749
1750	if ((parms->dev &&
1751	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1752	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1753	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1754			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1755	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1756	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1757			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1758	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1759	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1760	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1761			NEIGH_VAR(parms, UCAST_PROBES)) ||
1762	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1763			NEIGH_VAR(parms, MCAST_PROBES)) ||
1764	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1765			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1766	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
 
1767	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1768			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1769	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1770			  NEIGH_VAR(parms, GC_STALETIME)) ||
1771	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1772			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1773	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1774			  NEIGH_VAR(parms, RETRANS_TIME)) ||
1775	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1776			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1777	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1778			  NEIGH_VAR(parms, PROXY_DELAY)) ||
1779	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1780			  NEIGH_VAR(parms, LOCKTIME)))
1781		goto nla_put_failure;
1782	return nla_nest_end(skb, nest);
1783
1784nla_put_failure:
1785	nla_nest_cancel(skb, nest);
1786	return -EMSGSIZE;
1787}
1788
1789static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1790			      u32 pid, u32 seq, int type, int flags)
1791{
1792	struct nlmsghdr *nlh;
1793	struct ndtmsg *ndtmsg;
1794
1795	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1796	if (nlh == NULL)
1797		return -EMSGSIZE;
1798
1799	ndtmsg = nlmsg_data(nlh);
1800
1801	read_lock_bh(&tbl->lock);
1802	ndtmsg->ndtm_family = tbl->family;
1803	ndtmsg->ndtm_pad1   = 0;
1804	ndtmsg->ndtm_pad2   = 0;
1805
1806	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1807	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1808	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1809	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1810	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1811		goto nla_put_failure;
1812	{
1813		unsigned long now = jiffies;
1814		unsigned int flush_delta = now - tbl->last_flush;
1815		unsigned int rand_delta = now - tbl->last_rand;
1816		struct neigh_hash_table *nht;
1817		struct ndt_config ndc = {
1818			.ndtc_key_len		= tbl->key_len,
1819			.ndtc_entry_size	= tbl->entry_size,
1820			.ndtc_entries		= atomic_read(&tbl->entries),
1821			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1822			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1823			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1824		};
1825
1826		rcu_read_lock_bh();
1827		nht = rcu_dereference_bh(tbl->nht);
1828		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1829		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1830		rcu_read_unlock_bh();
1831
1832		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1833			goto nla_put_failure;
1834	}
1835
1836	{
1837		int cpu;
1838		struct ndt_stats ndst;
1839
1840		memset(&ndst, 0, sizeof(ndst));
1841
1842		for_each_possible_cpu(cpu) {
1843			struct neigh_statistics	*st;
1844
1845			st = per_cpu_ptr(tbl->stats, cpu);
1846			ndst.ndts_allocs		+= st->allocs;
1847			ndst.ndts_destroys		+= st->destroys;
1848			ndst.ndts_hash_grows		+= st->hash_grows;
1849			ndst.ndts_res_failed		+= st->res_failed;
1850			ndst.ndts_lookups		+= st->lookups;
1851			ndst.ndts_hits			+= st->hits;
1852			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1853			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1854			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1855			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1856			ndst.ndts_table_fulls		+= st->table_fulls;
1857		}
1858
1859		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
 
1860			goto nla_put_failure;
1861	}
1862
1863	BUG_ON(tbl->parms.dev);
1864	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1865		goto nla_put_failure;
1866
1867	read_unlock_bh(&tbl->lock);
1868	nlmsg_end(skb, nlh);
1869	return 0;
1870
1871nla_put_failure:
1872	read_unlock_bh(&tbl->lock);
1873	nlmsg_cancel(skb, nlh);
1874	return -EMSGSIZE;
1875}
1876
1877static int neightbl_fill_param_info(struct sk_buff *skb,
1878				    struct neigh_table *tbl,
1879				    struct neigh_parms *parms,
1880				    u32 pid, u32 seq, int type,
1881				    unsigned int flags)
1882{
1883	struct ndtmsg *ndtmsg;
1884	struct nlmsghdr *nlh;
1885
1886	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1887	if (nlh == NULL)
1888		return -EMSGSIZE;
1889
1890	ndtmsg = nlmsg_data(nlh);
1891
1892	read_lock_bh(&tbl->lock);
1893	ndtmsg->ndtm_family = tbl->family;
1894	ndtmsg->ndtm_pad1   = 0;
1895	ndtmsg->ndtm_pad2   = 0;
1896
1897	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1898	    neightbl_fill_parms(skb, parms) < 0)
1899		goto errout;
1900
1901	read_unlock_bh(&tbl->lock);
1902	nlmsg_end(skb, nlh);
1903	return 0;
1904errout:
1905	read_unlock_bh(&tbl->lock);
1906	nlmsg_cancel(skb, nlh);
1907	return -EMSGSIZE;
1908}
1909
1910static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1911	[NDTA_NAME]		= { .type = NLA_STRING },
1912	[NDTA_THRESH1]		= { .type = NLA_U32 },
1913	[NDTA_THRESH2]		= { .type = NLA_U32 },
1914	[NDTA_THRESH3]		= { .type = NLA_U32 },
1915	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1916	[NDTA_PARMS]		= { .type = NLA_NESTED },
1917};
1918
1919static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1920	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1921	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1922	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1923	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1924	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1925	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1926	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1927	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1928	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1929	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1930	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1931	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1932	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1933	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1934};
1935
1936static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1937{
1938	struct net *net = sock_net(skb->sk);
1939	struct neigh_table *tbl;
1940	struct ndtmsg *ndtmsg;
1941	struct nlattr *tb[NDTA_MAX+1];
1942	bool found = false;
1943	int err, tidx;
1944
1945	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1946			  nl_neightbl_policy);
1947	if (err < 0)
1948		goto errout;
1949
1950	if (tb[NDTA_NAME] == NULL) {
1951		err = -EINVAL;
1952		goto errout;
1953	}
1954
1955	ndtmsg = nlmsg_data(nlh);
1956
1957	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1958		tbl = neigh_tables[tidx];
1959		if (!tbl)
1960			continue;
1961		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1962			continue;
1963		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1964			found = true;
1965			break;
1966		}
1967	}
1968
1969	if (!found)
1970		return -ENOENT;
1971
1972	/*
1973	 * We acquire tbl->lock to be nice to the periodic timers and
1974	 * make sure they always see a consistent set of values.
1975	 */
1976	write_lock_bh(&tbl->lock);
1977
1978	if (tb[NDTA_PARMS]) {
1979		struct nlattr *tbp[NDTPA_MAX+1];
1980		struct neigh_parms *p;
1981		int i, ifindex = 0;
1982
1983		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1984				       nl_ntbl_parm_policy);
1985		if (err < 0)
1986			goto errout_tbl_lock;
1987
1988		if (tbp[NDTPA_IFINDEX])
1989			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1990
1991		p = lookup_neigh_parms(tbl, net, ifindex);
1992		if (p == NULL) {
1993			err = -ENOENT;
1994			goto errout_tbl_lock;
1995		}
1996
1997		for (i = 1; i <= NDTPA_MAX; i++) {
1998			if (tbp[i] == NULL)
1999				continue;
2000
2001			switch (i) {
2002			case NDTPA_QUEUE_LEN:
2003				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2004					      nla_get_u32(tbp[i]) *
2005					      SKB_TRUESIZE(ETH_FRAME_LEN));
2006				break;
2007			case NDTPA_QUEUE_LENBYTES:
2008				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2009					      nla_get_u32(tbp[i]));
2010				break;
2011			case NDTPA_PROXY_QLEN:
2012				NEIGH_VAR_SET(p, PROXY_QLEN,
2013					      nla_get_u32(tbp[i]));
2014				break;
2015			case NDTPA_APP_PROBES:
2016				NEIGH_VAR_SET(p, APP_PROBES,
2017					      nla_get_u32(tbp[i]));
2018				break;
2019			case NDTPA_UCAST_PROBES:
2020				NEIGH_VAR_SET(p, UCAST_PROBES,
2021					      nla_get_u32(tbp[i]));
2022				break;
2023			case NDTPA_MCAST_PROBES:
2024				NEIGH_VAR_SET(p, MCAST_PROBES,
2025					      nla_get_u32(tbp[i]));
2026				break;
2027			case NDTPA_MCAST_REPROBES:
2028				NEIGH_VAR_SET(p, MCAST_REPROBES,
2029					      nla_get_u32(tbp[i]));
2030				break;
2031			case NDTPA_BASE_REACHABLE_TIME:
2032				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2033					      nla_get_msecs(tbp[i]));
2034				/* update reachable_time as well, otherwise, the change will
2035				 * only be effective after the next time neigh_periodic_work
2036				 * decides to recompute it (can be multiple minutes)
2037				 */
2038				p->reachable_time =
2039					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2040				break;
2041			case NDTPA_GC_STALETIME:
2042				NEIGH_VAR_SET(p, GC_STALETIME,
2043					      nla_get_msecs(tbp[i]));
2044				break;
2045			case NDTPA_DELAY_PROBE_TIME:
2046				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2047					      nla_get_msecs(tbp[i]));
 
2048				break;
2049			case NDTPA_RETRANS_TIME:
2050				NEIGH_VAR_SET(p, RETRANS_TIME,
2051					      nla_get_msecs(tbp[i]));
2052				break;
2053			case NDTPA_ANYCAST_DELAY:
2054				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2055					      nla_get_msecs(tbp[i]));
2056				break;
2057			case NDTPA_PROXY_DELAY:
2058				NEIGH_VAR_SET(p, PROXY_DELAY,
2059					      nla_get_msecs(tbp[i]));
2060				break;
2061			case NDTPA_LOCKTIME:
2062				NEIGH_VAR_SET(p, LOCKTIME,
2063					      nla_get_msecs(tbp[i]));
2064				break;
2065			}
2066		}
2067	}
2068
2069	err = -ENOENT;
2070	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2071	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2072	    !net_eq(net, &init_net))
2073		goto errout_tbl_lock;
2074
2075	if (tb[NDTA_THRESH1])
2076		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2077
2078	if (tb[NDTA_THRESH2])
2079		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2080
2081	if (tb[NDTA_THRESH3])
2082		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2083
2084	if (tb[NDTA_GC_INTERVAL])
2085		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2086
2087	err = 0;
2088
2089errout_tbl_lock:
2090	write_unlock_bh(&tbl->lock);
2091errout:
2092	return err;
2093}
2094
2095static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2096{
2097	struct net *net = sock_net(skb->sk);
2098	int family, tidx, nidx = 0;
2099	int tbl_skip = cb->args[0];
2100	int neigh_skip = cb->args[1];
2101	struct neigh_table *tbl;
2102
2103	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2104
2105	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2106		struct neigh_parms *p;
2107
2108		tbl = neigh_tables[tidx];
2109		if (!tbl)
2110			continue;
2111
2112		if (tidx < tbl_skip || (family && tbl->family != family))
2113			continue;
2114
2115		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2116				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2117				       NLM_F_MULTI) < 0)
2118			break;
2119
2120		nidx = 0;
2121		p = list_next_entry(&tbl->parms, list);
2122		list_for_each_entry_from(p, &tbl->parms_list, list) {
2123			if (!net_eq(neigh_parms_net(p), net))
2124				continue;
2125
2126			if (nidx < neigh_skip)
2127				goto next;
2128
2129			if (neightbl_fill_param_info(skb, tbl, p,
2130						     NETLINK_CB(cb->skb).portid,
2131						     cb->nlh->nlmsg_seq,
2132						     RTM_NEWNEIGHTBL,
2133						     NLM_F_MULTI) < 0)
2134				goto out;
2135		next:
2136			nidx++;
2137		}
2138
2139		neigh_skip = 0;
2140	}
2141out:
2142	cb->args[0] = tidx;
2143	cb->args[1] = nidx;
2144
2145	return skb->len;
2146}
2147
2148static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2149			   u32 pid, u32 seq, int type, unsigned int flags)
2150{
2151	unsigned long now = jiffies;
2152	struct nda_cacheinfo ci;
2153	struct nlmsghdr *nlh;
2154	struct ndmsg *ndm;
2155
2156	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2157	if (nlh == NULL)
2158		return -EMSGSIZE;
2159
2160	ndm = nlmsg_data(nlh);
2161	ndm->ndm_family	 = neigh->ops->family;
2162	ndm->ndm_pad1    = 0;
2163	ndm->ndm_pad2    = 0;
2164	ndm->ndm_flags	 = neigh->flags;
2165	ndm->ndm_type	 = neigh->type;
2166	ndm->ndm_ifindex = neigh->dev->ifindex;
2167
2168	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2169		goto nla_put_failure;
2170
2171	read_lock_bh(&neigh->lock);
2172	ndm->ndm_state	 = neigh->nud_state;
2173	if (neigh->nud_state & NUD_VALID) {
2174		char haddr[MAX_ADDR_LEN];
2175
2176		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2177		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2178			read_unlock_bh(&neigh->lock);
2179			goto nla_put_failure;
2180		}
2181	}
2182
2183	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2184	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2185	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2186	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2187	read_unlock_bh(&neigh->lock);
2188
2189	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2190	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2191		goto nla_put_failure;
2192
2193	nlmsg_end(skb, nlh);
2194	return 0;
2195
2196nla_put_failure:
2197	nlmsg_cancel(skb, nlh);
2198	return -EMSGSIZE;
2199}
2200
2201static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2202			    u32 pid, u32 seq, int type, unsigned int flags,
2203			    struct neigh_table *tbl)
2204{
2205	struct nlmsghdr *nlh;
2206	struct ndmsg *ndm;
2207
2208	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2209	if (nlh == NULL)
2210		return -EMSGSIZE;
2211
2212	ndm = nlmsg_data(nlh);
2213	ndm->ndm_family	 = tbl->family;
2214	ndm->ndm_pad1    = 0;
2215	ndm->ndm_pad2    = 0;
2216	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2217	ndm->ndm_type	 = RTN_UNICAST;
2218	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219	ndm->ndm_state	 = NUD_NONE;
2220
2221	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2222		goto nla_put_failure;
2223
2224	nlmsg_end(skb, nlh);
2225	return 0;
2226
2227nla_put_failure:
2228	nlmsg_cancel(skb, nlh);
2229	return -EMSGSIZE;
2230}
2231
2232static void neigh_update_notify(struct neighbour *neigh)
2233{
2234	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2235	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2236}
2237
2238static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2239{
2240	struct net_device *master;
2241
2242	if (!master_idx)
2243		return false;
2244
2245	master = netdev_master_upper_dev_get(dev);
2246	if (!master || master->ifindex != master_idx)
2247		return true;
2248
2249	return false;
2250}
2251
2252static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2253{
2254	if (filter_idx && dev->ifindex != filter_idx)
2255		return true;
2256
2257	return false;
2258}
2259
2260static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2261			    struct netlink_callback *cb)
2262{
2263	struct net *net = sock_net(skb->sk);
2264	const struct nlmsghdr *nlh = cb->nlh;
2265	struct nlattr *tb[NDA_MAX + 1];
2266	struct neighbour *n;
2267	int rc, h, s_h = cb->args[1];
2268	int idx, s_idx = idx = cb->args[2];
2269	struct neigh_hash_table *nht;
2270	int filter_master_idx = 0, filter_idx = 0;
2271	unsigned int flags = NLM_F_MULTI;
2272	int err;
2273
2274	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2275	if (!err) {
2276		if (tb[NDA_IFINDEX])
2277			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2278
2279		if (tb[NDA_MASTER])
2280			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2281
2282		if (filter_idx || filter_master_idx)
2283			flags |= NLM_F_DUMP_FILTERED;
2284	}
2285
2286	rcu_read_lock_bh();
2287	nht = rcu_dereference_bh(tbl->nht);
2288
2289	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2290		if (h > s_h)
2291			s_idx = 0;
2292		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2293		     n != NULL;
2294		     n = rcu_dereference_bh(n->next)) {
2295			if (!net_eq(dev_net(n->dev), net))
2296				continue;
2297			if (neigh_ifindex_filtered(n->dev, filter_idx))
2298				continue;
2299			if (neigh_master_filtered(n->dev, filter_master_idx))
2300				continue;
2301			if (idx < s_idx)
2302				goto next;
2303			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2304					    cb->nlh->nlmsg_seq,
2305					    RTM_NEWNEIGH,
2306					    flags) < 0) {
2307				rc = -1;
2308				goto out;
2309			}
2310next:
2311			idx++;
2312		}
2313	}
2314	rc = skb->len;
2315out:
2316	rcu_read_unlock_bh();
2317	cb->args[1] = h;
2318	cb->args[2] = idx;
2319	return rc;
2320}
2321
2322static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2323			     struct netlink_callback *cb)
2324{
2325	struct pneigh_entry *n;
2326	struct net *net = sock_net(skb->sk);
2327	int rc, h, s_h = cb->args[3];
2328	int idx, s_idx = idx = cb->args[4];
2329
2330	read_lock_bh(&tbl->lock);
2331
2332	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2333		if (h > s_h)
2334			s_idx = 0;
2335		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2336			if (pneigh_net(n) != net)
2337				continue;
2338			if (idx < s_idx)
2339				goto next;
2340			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2341					    cb->nlh->nlmsg_seq,
2342					    RTM_NEWNEIGH,
2343					    NLM_F_MULTI, tbl) < 0) {
2344				read_unlock_bh(&tbl->lock);
2345				rc = -1;
2346				goto out;
2347			}
2348		next:
2349			idx++;
2350		}
2351	}
2352
2353	read_unlock_bh(&tbl->lock);
2354	rc = skb->len;
2355out:
2356	cb->args[3] = h;
2357	cb->args[4] = idx;
2358	return rc;
2359
2360}
2361
2362static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2363{
2364	struct neigh_table *tbl;
2365	int t, family, s_t;
2366	int proxy = 0;
2367	int err;
2368
2369	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2370
2371	/* check for full ndmsg structure presence, family member is
2372	 * the same for both structures
2373	 */
2374	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2375	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2376		proxy = 1;
2377
2378	s_t = cb->args[0];
2379
2380	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2381		tbl = neigh_tables[t];
2382
2383		if (!tbl)
2384			continue;
2385		if (t < s_t || (family && tbl->family != family))
2386			continue;
2387		if (t > s_t)
2388			memset(&cb->args[1], 0, sizeof(cb->args) -
2389						sizeof(cb->args[0]));
2390		if (proxy)
2391			err = pneigh_dump_table(tbl, skb, cb);
2392		else
2393			err = neigh_dump_table(tbl, skb, cb);
2394		if (err < 0)
2395			break;
2396	}
2397
2398	cb->args[0] = t;
2399	return skb->len;
2400}
2401
2402void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2403{
2404	int chain;
2405	struct neigh_hash_table *nht;
2406
2407	rcu_read_lock_bh();
2408	nht = rcu_dereference_bh(tbl->nht);
2409
2410	read_lock(&tbl->lock); /* avoid resizes */
2411	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2412		struct neighbour *n;
2413
2414		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2415		     n != NULL;
2416		     n = rcu_dereference_bh(n->next))
2417			cb(n, cookie);
2418	}
2419	read_unlock(&tbl->lock);
2420	rcu_read_unlock_bh();
2421}
2422EXPORT_SYMBOL(neigh_for_each);
2423
2424/* The tbl->lock must be held as a writer and BH disabled. */
2425void __neigh_for_each_release(struct neigh_table *tbl,
2426			      int (*cb)(struct neighbour *))
2427{
2428	int chain;
2429	struct neigh_hash_table *nht;
2430
2431	nht = rcu_dereference_protected(tbl->nht,
2432					lockdep_is_held(&tbl->lock));
2433	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2434		struct neighbour *n;
2435		struct neighbour __rcu **np;
2436
2437		np = &nht->hash_buckets[chain];
2438		while ((n = rcu_dereference_protected(*np,
2439					lockdep_is_held(&tbl->lock))) != NULL) {
2440			int release;
2441
2442			write_lock(&n->lock);
2443			release = cb(n);
2444			if (release) {
2445				rcu_assign_pointer(*np,
2446					rcu_dereference_protected(n->next,
2447						lockdep_is_held(&tbl->lock)));
2448				n->dead = 1;
2449			} else
2450				np = &n->next;
2451			write_unlock(&n->lock);
2452			if (release)
2453				neigh_cleanup_and_release(n);
2454		}
2455	}
2456}
2457EXPORT_SYMBOL(__neigh_for_each_release);
2458
2459int neigh_xmit(int index, struct net_device *dev,
2460	       const void *addr, struct sk_buff *skb)
2461{
2462	int err = -EAFNOSUPPORT;
2463	if (likely(index < NEIGH_NR_TABLES)) {
2464		struct neigh_table *tbl;
2465		struct neighbour *neigh;
2466
2467		tbl = neigh_tables[index];
2468		if (!tbl)
2469			goto out;
 
2470		neigh = __neigh_lookup_noref(tbl, addr, dev);
2471		if (!neigh)
2472			neigh = __neigh_create(tbl, addr, dev, false);
2473		err = PTR_ERR(neigh);
2474		if (IS_ERR(neigh))
 
2475			goto out_kfree_skb;
 
2476		err = neigh->output(neigh, skb);
 
2477	}
2478	else if (index == NEIGH_LINK_TABLE) {
2479		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2480				      addr, NULL, skb->len);
2481		if (err < 0)
2482			goto out_kfree_skb;
2483		err = dev_queue_xmit(skb);
2484	}
2485out:
2486	return err;
2487out_kfree_skb:
2488	kfree_skb(skb);
2489	goto out;
2490}
2491EXPORT_SYMBOL(neigh_xmit);
2492
2493#ifdef CONFIG_PROC_FS
2494
2495static struct neighbour *neigh_get_first(struct seq_file *seq)
2496{
2497	struct neigh_seq_state *state = seq->private;
2498	struct net *net = seq_file_net(seq);
2499	struct neigh_hash_table *nht = state->nht;
2500	struct neighbour *n = NULL;
2501	int bucket = state->bucket;
2502
2503	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2504	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2505		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2506
2507		while (n) {
2508			if (!net_eq(dev_net(n->dev), net))
2509				goto next;
2510			if (state->neigh_sub_iter) {
2511				loff_t fakep = 0;
2512				void *v;
2513
2514				v = state->neigh_sub_iter(state, n, &fakep);
2515				if (!v)
2516					goto next;
2517			}
2518			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2519				break;
2520			if (n->nud_state & ~NUD_NOARP)
2521				break;
2522next:
2523			n = rcu_dereference_bh(n->next);
2524		}
2525
2526		if (n)
2527			break;
2528	}
2529	state->bucket = bucket;
2530
2531	return n;
2532}
2533
2534static struct neighbour *neigh_get_next(struct seq_file *seq,
2535					struct neighbour *n,
2536					loff_t *pos)
2537{
2538	struct neigh_seq_state *state = seq->private;
2539	struct net *net = seq_file_net(seq);
2540	struct neigh_hash_table *nht = state->nht;
2541
2542	if (state->neigh_sub_iter) {
2543		void *v = state->neigh_sub_iter(state, n, pos);
2544		if (v)
2545			return n;
2546	}
2547	n = rcu_dereference_bh(n->next);
2548
2549	while (1) {
2550		while (n) {
2551			if (!net_eq(dev_net(n->dev), net))
2552				goto next;
2553			if (state->neigh_sub_iter) {
2554				void *v = state->neigh_sub_iter(state, n, pos);
2555				if (v)
2556					return n;
2557				goto next;
2558			}
2559			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2560				break;
2561
2562			if (n->nud_state & ~NUD_NOARP)
2563				break;
2564next:
2565			n = rcu_dereference_bh(n->next);
2566		}
2567
2568		if (n)
2569			break;
2570
2571		if (++state->bucket >= (1 << nht->hash_shift))
2572			break;
2573
2574		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2575	}
2576
2577	if (n && pos)
2578		--(*pos);
2579	return n;
2580}
2581
2582static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2583{
2584	struct neighbour *n = neigh_get_first(seq);
2585
2586	if (n) {
2587		--(*pos);
2588		while (*pos) {
2589			n = neigh_get_next(seq, n, pos);
2590			if (!n)
2591				break;
2592		}
2593	}
2594	return *pos ? NULL : n;
2595}
2596
2597static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2598{
2599	struct neigh_seq_state *state = seq->private;
2600	struct net *net = seq_file_net(seq);
2601	struct neigh_table *tbl = state->tbl;
2602	struct pneigh_entry *pn = NULL;
2603	int bucket = state->bucket;
2604
2605	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2606	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2607		pn = tbl->phash_buckets[bucket];
2608		while (pn && !net_eq(pneigh_net(pn), net))
2609			pn = pn->next;
2610		if (pn)
2611			break;
2612	}
2613	state->bucket = bucket;
2614
2615	return pn;
2616}
2617
2618static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2619					    struct pneigh_entry *pn,
2620					    loff_t *pos)
2621{
2622	struct neigh_seq_state *state = seq->private;
2623	struct net *net = seq_file_net(seq);
2624	struct neigh_table *tbl = state->tbl;
2625
2626	do {
2627		pn = pn->next;
2628	} while (pn && !net_eq(pneigh_net(pn), net));
2629
2630	while (!pn) {
2631		if (++state->bucket > PNEIGH_HASHMASK)
2632			break;
2633		pn = tbl->phash_buckets[state->bucket];
2634		while (pn && !net_eq(pneigh_net(pn), net))
2635			pn = pn->next;
2636		if (pn)
2637			break;
2638	}
2639
2640	if (pn && pos)
2641		--(*pos);
2642
2643	return pn;
2644}
2645
2646static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2647{
2648	struct pneigh_entry *pn = pneigh_get_first(seq);
2649
2650	if (pn) {
2651		--(*pos);
2652		while (*pos) {
2653			pn = pneigh_get_next(seq, pn, pos);
2654			if (!pn)
2655				break;
2656		}
2657	}
2658	return *pos ? NULL : pn;
2659}
2660
2661static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2662{
2663	struct neigh_seq_state *state = seq->private;
2664	void *rc;
2665	loff_t idxpos = *pos;
2666
2667	rc = neigh_get_idx(seq, &idxpos);
2668	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2669		rc = pneigh_get_idx(seq, &idxpos);
2670
2671	return rc;
2672}
2673
2674void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2675	__acquires(rcu_bh)
2676{
2677	struct neigh_seq_state *state = seq->private;
2678
2679	state->tbl = tbl;
2680	state->bucket = 0;
2681	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2682
2683	rcu_read_lock_bh();
2684	state->nht = rcu_dereference_bh(tbl->nht);
2685
2686	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2687}
2688EXPORT_SYMBOL(neigh_seq_start);
2689
2690void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2691{
2692	struct neigh_seq_state *state;
2693	void *rc;
2694
2695	if (v == SEQ_START_TOKEN) {
2696		rc = neigh_get_first(seq);
2697		goto out;
2698	}
2699
2700	state = seq->private;
2701	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2702		rc = neigh_get_next(seq, v, NULL);
2703		if (rc)
2704			goto out;
2705		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2706			rc = pneigh_get_first(seq);
2707	} else {
2708		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2709		rc = pneigh_get_next(seq, v, NULL);
2710	}
2711out:
2712	++(*pos);
2713	return rc;
2714}
2715EXPORT_SYMBOL(neigh_seq_next);
2716
2717void neigh_seq_stop(struct seq_file *seq, void *v)
2718	__releases(rcu_bh)
2719{
2720	rcu_read_unlock_bh();
2721}
2722EXPORT_SYMBOL(neigh_seq_stop);
2723
2724/* statistics via seq_file */
2725
2726static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2727{
2728	struct neigh_table *tbl = seq->private;
2729	int cpu;
2730
2731	if (*pos == 0)
2732		return SEQ_START_TOKEN;
2733
2734	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2735		if (!cpu_possible(cpu))
2736			continue;
2737		*pos = cpu+1;
2738		return per_cpu_ptr(tbl->stats, cpu);
2739	}
2740	return NULL;
2741}
2742
2743static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2744{
2745	struct neigh_table *tbl = seq->private;
2746	int cpu;
2747
2748	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2749		if (!cpu_possible(cpu))
2750			continue;
2751		*pos = cpu+1;
2752		return per_cpu_ptr(tbl->stats, cpu);
2753	}
2754	return NULL;
2755}
2756
2757static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2758{
2759
2760}
2761
2762static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2763{
2764	struct neigh_table *tbl = seq->private;
2765	struct neigh_statistics *st = v;
2766
2767	if (v == SEQ_START_TOKEN) {
2768		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2769		return 0;
2770	}
2771
2772	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2773			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2774		   atomic_read(&tbl->entries),
2775
2776		   st->allocs,
2777		   st->destroys,
2778		   st->hash_grows,
2779
2780		   st->lookups,
2781		   st->hits,
2782
2783		   st->res_failed,
2784
2785		   st->rcv_probes_mcast,
2786		   st->rcv_probes_ucast,
2787
2788		   st->periodic_gc_runs,
2789		   st->forced_gc_runs,
2790		   st->unres_discards,
2791		   st->table_fulls
2792		   );
2793
2794	return 0;
2795}
2796
2797static const struct seq_operations neigh_stat_seq_ops = {
2798	.start	= neigh_stat_seq_start,
2799	.next	= neigh_stat_seq_next,
2800	.stop	= neigh_stat_seq_stop,
2801	.show	= neigh_stat_seq_show,
2802};
2803
2804static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2805{
2806	int ret = seq_open(file, &neigh_stat_seq_ops);
2807
2808	if (!ret) {
2809		struct seq_file *sf = file->private_data;
2810		sf->private = PDE_DATA(inode);
2811	}
2812	return ret;
2813};
2814
2815static const struct file_operations neigh_stat_seq_fops = {
2816	.owner	 = THIS_MODULE,
2817	.open 	 = neigh_stat_seq_open,
2818	.read	 = seq_read,
2819	.llseek	 = seq_lseek,
2820	.release = seq_release,
2821};
2822
2823#endif /* CONFIG_PROC_FS */
2824
2825static inline size_t neigh_nlmsg_size(void)
2826{
2827	return NLMSG_ALIGN(sizeof(struct ndmsg))
2828	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2829	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2830	       + nla_total_size(sizeof(struct nda_cacheinfo))
2831	       + nla_total_size(4); /* NDA_PROBES */
2832}
2833
2834static void __neigh_notify(struct neighbour *n, int type, int flags)
2835{
2836	struct net *net = dev_net(n->dev);
2837	struct sk_buff *skb;
2838	int err = -ENOBUFS;
2839
2840	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2841	if (skb == NULL)
2842		goto errout;
2843
2844	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2845	if (err < 0) {
2846		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2847		WARN_ON(err == -EMSGSIZE);
2848		kfree_skb(skb);
2849		goto errout;
2850	}
2851	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2852	return;
2853errout:
2854	if (err < 0)
2855		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2856}
2857
2858void neigh_app_ns(struct neighbour *n)
2859{
2860	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2861}
2862EXPORT_SYMBOL(neigh_app_ns);
2863
2864#ifdef CONFIG_SYSCTL
2865static int zero;
2866static int int_max = INT_MAX;
2867static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2868
2869static int proc_unres_qlen(struct ctl_table *ctl, int write,
2870			   void __user *buffer, size_t *lenp, loff_t *ppos)
2871{
2872	int size, ret;
2873	struct ctl_table tmp = *ctl;
2874
2875	tmp.extra1 = &zero;
2876	tmp.extra2 = &unres_qlen_max;
2877	tmp.data = &size;
2878
2879	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2880	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2881
2882	if (write && !ret)
2883		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2884	return ret;
2885}
2886
2887static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2888						   int family)
2889{
2890	switch (family) {
2891	case AF_INET:
2892		return __in_dev_arp_parms_get_rcu(dev);
2893	case AF_INET6:
2894		return __in6_dev_nd_parms_get_rcu(dev);
2895	}
2896	return NULL;
2897}
2898
2899static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2900				  int index)
2901{
2902	struct net_device *dev;
2903	int family = neigh_parms_family(p);
2904
2905	rcu_read_lock();
2906	for_each_netdev_rcu(net, dev) {
2907		struct neigh_parms *dst_p =
2908				neigh_get_dev_parms_rcu(dev, family);
2909
2910		if (dst_p && !test_bit(index, dst_p->data_state))
2911			dst_p->data[index] = p->data[index];
2912	}
2913	rcu_read_unlock();
2914}
2915
2916static void neigh_proc_update(struct ctl_table *ctl, int write)
2917{
2918	struct net_device *dev = ctl->extra1;
2919	struct neigh_parms *p = ctl->extra2;
2920	struct net *net = neigh_parms_net(p);
2921	int index = (int *) ctl->data - p->data;
2922
2923	if (!write)
2924		return;
2925
2926	set_bit(index, p->data_state);
 
 
2927	if (!dev) /* NULL dev means this is default value */
2928		neigh_copy_dflt_parms(net, p, index);
2929}
2930
2931static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2932					   void __user *buffer,
2933					   size_t *lenp, loff_t *ppos)
2934{
2935	struct ctl_table tmp = *ctl;
2936	int ret;
2937
2938	tmp.extra1 = &zero;
2939	tmp.extra2 = &int_max;
2940
2941	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2942	neigh_proc_update(ctl, write);
2943	return ret;
2944}
2945
2946int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2947			void __user *buffer, size_t *lenp, loff_t *ppos)
2948{
2949	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2950
2951	neigh_proc_update(ctl, write);
2952	return ret;
2953}
2954EXPORT_SYMBOL(neigh_proc_dointvec);
2955
2956int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2957				void __user *buffer,
2958				size_t *lenp, loff_t *ppos)
2959{
2960	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2961
2962	neigh_proc_update(ctl, write);
2963	return ret;
2964}
2965EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2966
2967static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2968					      void __user *buffer,
2969					      size_t *lenp, loff_t *ppos)
2970{
2971	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2972
2973	neigh_proc_update(ctl, write);
2974	return ret;
2975}
2976
2977int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2978				   void __user *buffer,
2979				   size_t *lenp, loff_t *ppos)
2980{
2981	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2982
2983	neigh_proc_update(ctl, write);
2984	return ret;
2985}
2986EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2987
2988static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2989					  void __user *buffer,
2990					  size_t *lenp, loff_t *ppos)
2991{
2992	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2993
2994	neigh_proc_update(ctl, write);
2995	return ret;
2996}
2997
2998static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
2999					  void __user *buffer,
3000					  size_t *lenp, loff_t *ppos)
3001{
3002	struct neigh_parms *p = ctl->extra2;
3003	int ret;
3004
3005	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3006		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3007	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3008		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3009	else
3010		ret = -1;
3011
3012	if (write && ret == 0) {
3013		/* update reachable_time as well, otherwise, the change will
3014		 * only be effective after the next time neigh_periodic_work
3015		 * decides to recompute it
3016		 */
3017		p->reachable_time =
3018			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3019	}
3020	return ret;
3021}
3022
3023#define NEIGH_PARMS_DATA_OFFSET(index)	\
3024	(&((struct neigh_parms *) 0)->data[index])
3025
3026#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3027	[NEIGH_VAR_ ## attr] = { \
3028		.procname	= name, \
3029		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3030		.maxlen		= sizeof(int), \
3031		.mode		= mval, \
3032		.proc_handler	= proc, \
3033	}
3034
3035#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3036	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3037
3038#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3039	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3040
3041#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3042	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3043
3044#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3045	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3046
3047#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3048	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3049
3050#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3051	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3052
3053static struct neigh_sysctl_table {
3054	struct ctl_table_header *sysctl_header;
3055	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3056} neigh_sysctl_template __read_mostly = {
3057	.neigh_vars = {
3058		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3059		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3060		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3061		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3062		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3063		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3064		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3065		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3066		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3067		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3068		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3069		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3070		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3071		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3072		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3073		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3074		[NEIGH_VAR_GC_INTERVAL] = {
3075			.procname	= "gc_interval",
3076			.maxlen		= sizeof(int),
3077			.mode		= 0644,
3078			.proc_handler	= proc_dointvec_jiffies,
3079		},
3080		[NEIGH_VAR_GC_THRESH1] = {
3081			.procname	= "gc_thresh1",
3082			.maxlen		= sizeof(int),
3083			.mode		= 0644,
3084			.extra1 	= &zero,
3085			.extra2		= &int_max,
3086			.proc_handler	= proc_dointvec_minmax,
3087		},
3088		[NEIGH_VAR_GC_THRESH2] = {
3089			.procname	= "gc_thresh2",
3090			.maxlen		= sizeof(int),
3091			.mode		= 0644,
3092			.extra1 	= &zero,
3093			.extra2		= &int_max,
3094			.proc_handler	= proc_dointvec_minmax,
3095		},
3096		[NEIGH_VAR_GC_THRESH3] = {
3097			.procname	= "gc_thresh3",
3098			.maxlen		= sizeof(int),
3099			.mode		= 0644,
3100			.extra1 	= &zero,
3101			.extra2		= &int_max,
3102			.proc_handler	= proc_dointvec_minmax,
3103		},
3104		{},
3105	},
3106};
3107
3108int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3109			  proc_handler *handler)
3110{
3111	int i;
3112	struct neigh_sysctl_table *t;
3113	const char *dev_name_source;
3114	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3115	char *p_name;
3116
3117	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3118	if (!t)
3119		goto err;
3120
3121	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3122		t->neigh_vars[i].data += (long) p;
3123		t->neigh_vars[i].extra1 = dev;
3124		t->neigh_vars[i].extra2 = p;
3125	}
3126
3127	if (dev) {
3128		dev_name_source = dev->name;
3129		/* Terminate the table early */
3130		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3131		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3132	} else {
3133		struct neigh_table *tbl = p->tbl;
3134		dev_name_source = "default";
3135		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3136		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3137		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3138		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3139	}
3140
3141	if (handler) {
3142		/* RetransTime */
3143		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3144		/* ReachableTime */
3145		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3146		/* RetransTime (in milliseconds)*/
3147		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3148		/* ReachableTime (in milliseconds) */
3149		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3150	} else {
3151		/* Those handlers will update p->reachable_time after
3152		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3153		 * applied after the next neighbour update instead of waiting for
3154		 * neigh_periodic_work to update its value (can be multiple minutes)
3155		 * So any handler that replaces them should do this as well
3156		 */
3157		/* ReachableTime */
3158		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3159			neigh_proc_base_reachable_time;
3160		/* ReachableTime (in milliseconds) */
3161		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3162			neigh_proc_base_reachable_time;
3163	}
3164
3165	/* Don't export sysctls to unprivileged users */
3166	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3167		t->neigh_vars[0].procname = NULL;
3168
3169	switch (neigh_parms_family(p)) {
3170	case AF_INET:
3171	      p_name = "ipv4";
3172	      break;
3173	case AF_INET6:
3174	      p_name = "ipv6";
3175	      break;
3176	default:
3177	      BUG();
3178	}
3179
3180	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3181		p_name, dev_name_source);
3182	t->sysctl_header =
3183		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3184	if (!t->sysctl_header)
3185		goto free;
3186
3187	p->sysctl_table = t;
3188	return 0;
3189
3190free:
3191	kfree(t);
3192err:
3193	return -ENOBUFS;
3194}
3195EXPORT_SYMBOL(neigh_sysctl_register);
3196
3197void neigh_sysctl_unregister(struct neigh_parms *p)
3198{
3199	if (p->sysctl_table) {
3200		struct neigh_sysctl_table *t = p->sysctl_table;
3201		p->sysctl_table = NULL;
3202		unregister_net_sysctl_table(t->sysctl_header);
3203		kfree(t);
3204	}
3205}
3206EXPORT_SYMBOL(neigh_sysctl_unregister);
3207
3208#endif	/* CONFIG_SYSCTL */
3209
3210static int __init neigh_init(void)
3211{
3212	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3213	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3214	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3215
3216	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3217		      NULL);
3218	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3219
3220	return 0;
3221}
3222
3223subsys_initcall(neigh_init);
3224
v4.10.11
   1/*
   2 *	Generic address resolution entity
   3 *
   4 *	Authors:
   5 *	Pedro Roque		<roque@di.fc.ul.pt>
   6 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   7 *
   8 *	This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 *
  13 *	Fixes:
  14 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  15 *	Harald Welte		Add neighbour cache statistics like rtstat
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/socket.h>
  25#include <linux/netdevice.h>
  26#include <linux/proc_fs.h>
  27#ifdef CONFIG_SYSCTL
  28#include <linux/sysctl.h>
  29#endif
  30#include <linux/times.h>
  31#include <net/net_namespace.h>
  32#include <net/neighbour.h>
  33#include <net/dst.h>
  34#include <net/sock.h>
  35#include <net/netevent.h>
  36#include <net/netlink.h>
  37#include <linux/rtnetlink.h>
  38#include <linux/random.h>
  39#include <linux/string.h>
  40#include <linux/log2.h>
  41#include <linux/inetdevice.h>
  42#include <net/addrconf.h>
  43
  44#define DEBUG
  45#define NEIGH_DEBUG 1
  46#define neigh_dbg(level, fmt, ...)		\
  47do {						\
  48	if (level <= NEIGH_DEBUG)		\
  49		pr_debug(fmt, ##__VA_ARGS__);	\
  50} while (0)
  51
  52#define PNEIGH_HASHMASK		0xF
  53
  54static void neigh_timer_handler(unsigned long arg);
  55static void __neigh_notify(struct neighbour *n, int type, int flags);
  56static void neigh_update_notify(struct neighbour *neigh);
  57static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
  58
  59#ifdef CONFIG_PROC_FS
  60static const struct file_operations neigh_stat_seq_fops;
  61#endif
  62
  63/*
  64   Neighbour hash table buckets are protected with rwlock tbl->lock.
  65
  66   - All the scans/updates to hash buckets MUST be made under this lock.
  67   - NOTHING clever should be made under this lock: no callbacks
  68     to protocol backends, no attempts to send something to network.
  69     It will result in deadlocks, if backend/driver wants to use neighbour
  70     cache.
  71   - If the entry requires some non-trivial actions, increase
  72     its reference count and release table lock.
  73
  74   Neighbour entries are protected:
  75   - with reference count.
  76   - with rwlock neigh->lock
  77
  78   Reference count prevents destruction.
  79
  80   neigh->lock mainly serializes ll address data and its validity state.
  81   However, the same lock is used to protect another entry fields:
  82    - timer
  83    - resolution queue
  84
  85   Again, nothing clever shall be made under neigh->lock,
  86   the most complicated procedure, which we allow is dev->hard_header.
  87   It is supposed, that dev->hard_header is simplistic and does
  88   not make callbacks to neighbour tables.
  89 */
  90
  91static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
  92{
  93	kfree_skb(skb);
  94	return -ENETDOWN;
  95}
  96
  97static void neigh_cleanup_and_release(struct neighbour *neigh)
  98{
  99	if (neigh->parms->neigh_cleanup)
 100		neigh->parms->neigh_cleanup(neigh);
 101
 102	__neigh_notify(neigh, RTM_DELNEIGH, 0);
 103	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
 104	neigh_release(neigh);
 105}
 106
 107/*
 108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 109 * It corresponds to default IPv6 settings and is not overridable,
 110 * because it is really reasonable choice.
 111 */
 112
 113unsigned long neigh_rand_reach_time(unsigned long base)
 114{
 115	return base ? (prandom_u32() % base) + (base >> 1) : 0;
 116}
 117EXPORT_SYMBOL(neigh_rand_reach_time);
 118
 119
 120static int neigh_forced_gc(struct neigh_table *tbl)
 121{
 122	int shrunk = 0;
 123	int i;
 124	struct neigh_hash_table *nht;
 125
 126	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 127
 128	write_lock_bh(&tbl->lock);
 129	nht = rcu_dereference_protected(tbl->nht,
 130					lockdep_is_held(&tbl->lock));
 131	for (i = 0; i < (1 << nht->hash_shift); i++) {
 132		struct neighbour *n;
 133		struct neighbour __rcu **np;
 134
 135		np = &nht->hash_buckets[i];
 136		while ((n = rcu_dereference_protected(*np,
 137					lockdep_is_held(&tbl->lock))) != NULL) {
 138			/* Neighbour record may be discarded if:
 139			 * - nobody refers to it.
 140			 * - it is not permanent
 141			 */
 142			write_lock(&n->lock);
 143			if (atomic_read(&n->refcnt) == 1 &&
 144			    !(n->nud_state & NUD_PERMANENT)) {
 145				rcu_assign_pointer(*np,
 146					rcu_dereference_protected(n->next,
 147						  lockdep_is_held(&tbl->lock)));
 148				n->dead = 1;
 149				shrunk	= 1;
 150				write_unlock(&n->lock);
 151				neigh_cleanup_and_release(n);
 152				continue;
 153			}
 154			write_unlock(&n->lock);
 155			np = &n->next;
 156		}
 157	}
 158
 159	tbl->last_flush = jiffies;
 160
 161	write_unlock_bh(&tbl->lock);
 162
 163	return shrunk;
 164}
 165
 166static void neigh_add_timer(struct neighbour *n, unsigned long when)
 167{
 168	neigh_hold(n);
 169	if (unlikely(mod_timer(&n->timer, when))) {
 170		printk("NEIGH: BUG, double timer add, state is %x\n",
 171		       n->nud_state);
 172		dump_stack();
 173	}
 174}
 175
 176static int neigh_del_timer(struct neighbour *n)
 177{
 178	if ((n->nud_state & NUD_IN_TIMER) &&
 179	    del_timer(&n->timer)) {
 180		neigh_release(n);
 181		return 1;
 182	}
 183	return 0;
 184}
 185
 186static void pneigh_queue_purge(struct sk_buff_head *list)
 187{
 188	struct sk_buff *skb;
 189
 190	while ((skb = skb_dequeue(list)) != NULL) {
 191		dev_put(skb->dev);
 192		kfree_skb(skb);
 193	}
 194}
 195
 196static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 197{
 198	int i;
 199	struct neigh_hash_table *nht;
 200
 201	nht = rcu_dereference_protected(tbl->nht,
 202					lockdep_is_held(&tbl->lock));
 203
 204	for (i = 0; i < (1 << nht->hash_shift); i++) {
 205		struct neighbour *n;
 206		struct neighbour __rcu **np = &nht->hash_buckets[i];
 207
 208		while ((n = rcu_dereference_protected(*np,
 209					lockdep_is_held(&tbl->lock))) != NULL) {
 210			if (dev && n->dev != dev) {
 211				np = &n->next;
 212				continue;
 213			}
 214			rcu_assign_pointer(*np,
 215				   rcu_dereference_protected(n->next,
 216						lockdep_is_held(&tbl->lock)));
 217			write_lock(&n->lock);
 218			neigh_del_timer(n);
 219			n->dead = 1;
 220
 221			if (atomic_read(&n->refcnt) != 1) {
 222				/* The most unpleasant situation.
 223				   We must destroy neighbour entry,
 224				   but someone still uses it.
 225
 226				   The destroy will be delayed until
 227				   the last user releases us, but
 228				   we must kill timers etc. and move
 229				   it to safe state.
 230				 */
 231				__skb_queue_purge(&n->arp_queue);
 232				n->arp_queue_len_bytes = 0;
 233				n->output = neigh_blackhole;
 234				if (n->nud_state & NUD_VALID)
 235					n->nud_state = NUD_NOARP;
 236				else
 237					n->nud_state = NUD_NONE;
 238				neigh_dbg(2, "neigh %p is stray\n", n);
 239			}
 240			write_unlock(&n->lock);
 241			neigh_cleanup_and_release(n);
 242		}
 243	}
 244}
 245
 246void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 247{
 248	write_lock_bh(&tbl->lock);
 249	neigh_flush_dev(tbl, dev);
 250	write_unlock_bh(&tbl->lock);
 251}
 252EXPORT_SYMBOL(neigh_changeaddr);
 253
 254int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 255{
 256	write_lock_bh(&tbl->lock);
 257	neigh_flush_dev(tbl, dev);
 258	pneigh_ifdown(tbl, dev);
 259	write_unlock_bh(&tbl->lock);
 260
 261	del_timer_sync(&tbl->proxy_timer);
 262	pneigh_queue_purge(&tbl->proxy_queue);
 263	return 0;
 264}
 265EXPORT_SYMBOL(neigh_ifdown);
 266
 267static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
 268{
 269	struct neighbour *n = NULL;
 270	unsigned long now = jiffies;
 271	int entries;
 272
 273	entries = atomic_inc_return(&tbl->entries) - 1;
 274	if (entries >= tbl->gc_thresh3 ||
 275	    (entries >= tbl->gc_thresh2 &&
 276	     time_after(now, tbl->last_flush + 5 * HZ))) {
 277		if (!neigh_forced_gc(tbl) &&
 278		    entries >= tbl->gc_thresh3) {
 279			net_info_ratelimited("%s: neighbor table overflow!\n",
 280					     tbl->id);
 281			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 282			goto out_entries;
 283		}
 284	}
 285
 286	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 287	if (!n)
 288		goto out_entries;
 289
 290	__skb_queue_head_init(&n->arp_queue);
 291	rwlock_init(&n->lock);
 292	seqlock_init(&n->ha_lock);
 293	n->updated	  = n->used = now;
 294	n->nud_state	  = NUD_NONE;
 295	n->output	  = neigh_blackhole;
 296	seqlock_init(&n->hh.hh_lock);
 297	n->parms	  = neigh_parms_clone(&tbl->parms);
 298	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
 299
 300	NEIGH_CACHE_STAT_INC(tbl, allocs);
 301	n->tbl		  = tbl;
 302	atomic_set(&n->refcnt, 1);
 303	n->dead		  = 1;
 304out:
 305	return n;
 306
 307out_entries:
 308	atomic_dec(&tbl->entries);
 309	goto out;
 310}
 311
 312static void neigh_get_hash_rnd(u32 *x)
 313{
 314	get_random_bytes(x, sizeof(*x));
 315	*x |= 1;
 316}
 317
 318static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 319{
 320	size_t size = (1 << shift) * sizeof(struct neighbour *);
 321	struct neigh_hash_table *ret;
 322	struct neighbour __rcu **buckets;
 323	int i;
 324
 325	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 326	if (!ret)
 327		return NULL;
 328	if (size <= PAGE_SIZE)
 329		buckets = kzalloc(size, GFP_ATOMIC);
 330	else
 331		buckets = (struct neighbour __rcu **)
 332			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 333					   get_order(size));
 334	if (!buckets) {
 335		kfree(ret);
 336		return NULL;
 337	}
 338	ret->hash_buckets = buckets;
 339	ret->hash_shift = shift;
 340	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 341		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 342	return ret;
 343}
 344
 345static void neigh_hash_free_rcu(struct rcu_head *head)
 346{
 347	struct neigh_hash_table *nht = container_of(head,
 348						    struct neigh_hash_table,
 349						    rcu);
 350	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 351	struct neighbour __rcu **buckets = nht->hash_buckets;
 352
 353	if (size <= PAGE_SIZE)
 354		kfree(buckets);
 355	else
 356		free_pages((unsigned long)buckets, get_order(size));
 357	kfree(nht);
 358}
 359
 360static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 361						unsigned long new_shift)
 362{
 363	unsigned int i, hash;
 364	struct neigh_hash_table *new_nht, *old_nht;
 365
 366	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 367
 368	old_nht = rcu_dereference_protected(tbl->nht,
 369					    lockdep_is_held(&tbl->lock));
 370	new_nht = neigh_hash_alloc(new_shift);
 371	if (!new_nht)
 372		return old_nht;
 373
 374	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 375		struct neighbour *n, *next;
 376
 377		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 378						   lockdep_is_held(&tbl->lock));
 379		     n != NULL;
 380		     n = next) {
 381			hash = tbl->hash(n->primary_key, n->dev,
 382					 new_nht->hash_rnd);
 383
 384			hash >>= (32 - new_nht->hash_shift);
 385			next = rcu_dereference_protected(n->next,
 386						lockdep_is_held(&tbl->lock));
 387
 388			rcu_assign_pointer(n->next,
 389					   rcu_dereference_protected(
 390						new_nht->hash_buckets[hash],
 391						lockdep_is_held(&tbl->lock)));
 392			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 393		}
 394	}
 395
 396	rcu_assign_pointer(tbl->nht, new_nht);
 397	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 398	return new_nht;
 399}
 400
 401struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 402			       struct net_device *dev)
 403{
 404	struct neighbour *n;
 405
 406	NEIGH_CACHE_STAT_INC(tbl, lookups);
 407
 408	rcu_read_lock_bh();
 409	n = __neigh_lookup_noref(tbl, pkey, dev);
 410	if (n) {
 411		if (!atomic_inc_not_zero(&n->refcnt))
 412			n = NULL;
 413		NEIGH_CACHE_STAT_INC(tbl, hits);
 414	}
 415
 416	rcu_read_unlock_bh();
 417	return n;
 418}
 419EXPORT_SYMBOL(neigh_lookup);
 420
 421struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 422				     const void *pkey)
 423{
 424	struct neighbour *n;
 425	int key_len = tbl->key_len;
 426	u32 hash_val;
 427	struct neigh_hash_table *nht;
 428
 429	NEIGH_CACHE_STAT_INC(tbl, lookups);
 430
 431	rcu_read_lock_bh();
 432	nht = rcu_dereference_bh(tbl->nht);
 433	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
 434
 435	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 436	     n != NULL;
 437	     n = rcu_dereference_bh(n->next)) {
 438		if (!memcmp(n->primary_key, pkey, key_len) &&
 439		    net_eq(dev_net(n->dev), net)) {
 440			if (!atomic_inc_not_zero(&n->refcnt))
 441				n = NULL;
 442			NEIGH_CACHE_STAT_INC(tbl, hits);
 443			break;
 444		}
 445	}
 446
 447	rcu_read_unlock_bh();
 448	return n;
 449}
 450EXPORT_SYMBOL(neigh_lookup_nodev);
 451
 452struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 453				 struct net_device *dev, bool want_ref)
 454{
 455	u32 hash_val;
 456	int key_len = tbl->key_len;
 457	int error;
 458	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
 459	struct neigh_hash_table *nht;
 460
 461	if (!n) {
 462		rc = ERR_PTR(-ENOBUFS);
 463		goto out;
 464	}
 465
 466	memcpy(n->primary_key, pkey, key_len);
 467	n->dev = dev;
 468	dev_hold(dev);
 469
 470	/* Protocol specific setup. */
 471	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 472		rc = ERR_PTR(error);
 473		goto out_neigh_release;
 474	}
 475
 476	if (dev->netdev_ops->ndo_neigh_construct) {
 477		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
 478		if (error < 0) {
 479			rc = ERR_PTR(error);
 480			goto out_neigh_release;
 481		}
 482	}
 483
 484	/* Device specific setup. */
 485	if (n->parms->neigh_setup &&
 486	    (error = n->parms->neigh_setup(n)) < 0) {
 487		rc = ERR_PTR(error);
 488		goto out_neigh_release;
 489	}
 490
 491	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 492
 493	write_lock_bh(&tbl->lock);
 494	nht = rcu_dereference_protected(tbl->nht,
 495					lockdep_is_held(&tbl->lock));
 496
 497	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 498		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 499
 500	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 501
 502	if (n->parms->dead) {
 503		rc = ERR_PTR(-EINVAL);
 504		goto out_tbl_unlock;
 505	}
 506
 507	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 508					    lockdep_is_held(&tbl->lock));
 509	     n1 != NULL;
 510	     n1 = rcu_dereference_protected(n1->next,
 511			lockdep_is_held(&tbl->lock))) {
 512		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
 513			if (want_ref)
 514				neigh_hold(n1);
 515			rc = n1;
 516			goto out_tbl_unlock;
 517		}
 518	}
 519
 520	n->dead = 0;
 521	if (want_ref)
 522		neigh_hold(n);
 523	rcu_assign_pointer(n->next,
 524			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 525						     lockdep_is_held(&tbl->lock)));
 526	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 527	write_unlock_bh(&tbl->lock);
 528	neigh_dbg(2, "neigh %p is created\n", n);
 529	rc = n;
 530out:
 531	return rc;
 532out_tbl_unlock:
 533	write_unlock_bh(&tbl->lock);
 534out_neigh_release:
 535	neigh_release(n);
 536	goto out;
 537}
 538EXPORT_SYMBOL(__neigh_create);
 539
 540static u32 pneigh_hash(const void *pkey, int key_len)
 541{
 542	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 543	hash_val ^= (hash_val >> 16);
 544	hash_val ^= hash_val >> 8;
 545	hash_val ^= hash_val >> 4;
 546	hash_val &= PNEIGH_HASHMASK;
 547	return hash_val;
 548}
 549
 550static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 551					      struct net *net,
 552					      const void *pkey,
 553					      int key_len,
 554					      struct net_device *dev)
 555{
 556	while (n) {
 557		if (!memcmp(n->key, pkey, key_len) &&
 558		    net_eq(pneigh_net(n), net) &&
 559		    (n->dev == dev || !n->dev))
 560			return n;
 561		n = n->next;
 562	}
 563	return NULL;
 564}
 565
 566struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 567		struct net *net, const void *pkey, struct net_device *dev)
 568{
 569	int key_len = tbl->key_len;
 570	u32 hash_val = pneigh_hash(pkey, key_len);
 571
 572	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 573				 net, pkey, key_len, dev);
 574}
 575EXPORT_SYMBOL_GPL(__pneigh_lookup);
 576
 577struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 578				    struct net *net, const void *pkey,
 579				    struct net_device *dev, int creat)
 580{
 581	struct pneigh_entry *n;
 582	int key_len = tbl->key_len;
 583	u32 hash_val = pneigh_hash(pkey, key_len);
 584
 585	read_lock_bh(&tbl->lock);
 586	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 587			      net, pkey, key_len, dev);
 588	read_unlock_bh(&tbl->lock);
 589
 590	if (n || !creat)
 591		goto out;
 592
 593	ASSERT_RTNL();
 594
 595	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
 596	if (!n)
 597		goto out;
 598
 599	write_pnet(&n->net, net);
 600	memcpy(n->key, pkey, key_len);
 601	n->dev = dev;
 602	if (dev)
 603		dev_hold(dev);
 604
 605	if (tbl->pconstructor && tbl->pconstructor(n)) {
 606		if (dev)
 607			dev_put(dev);
 608		kfree(n);
 609		n = NULL;
 610		goto out;
 611	}
 612
 613	write_lock_bh(&tbl->lock);
 614	n->next = tbl->phash_buckets[hash_val];
 615	tbl->phash_buckets[hash_val] = n;
 616	write_unlock_bh(&tbl->lock);
 617out:
 618	return n;
 619}
 620EXPORT_SYMBOL(pneigh_lookup);
 621
 622
 623int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 624		  struct net_device *dev)
 625{
 626	struct pneigh_entry *n, **np;
 627	int key_len = tbl->key_len;
 628	u32 hash_val = pneigh_hash(pkey, key_len);
 629
 630	write_lock_bh(&tbl->lock);
 631	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 632	     np = &n->next) {
 633		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 634		    net_eq(pneigh_net(n), net)) {
 635			*np = n->next;
 636			write_unlock_bh(&tbl->lock);
 637			if (tbl->pdestructor)
 638				tbl->pdestructor(n);
 639			if (n->dev)
 640				dev_put(n->dev);
 641			kfree(n);
 642			return 0;
 643		}
 644	}
 645	write_unlock_bh(&tbl->lock);
 646	return -ENOENT;
 647}
 648
 649static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 650{
 651	struct pneigh_entry *n, **np;
 652	u32 h;
 653
 654	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 655		np = &tbl->phash_buckets[h];
 656		while ((n = *np) != NULL) {
 657			if (!dev || n->dev == dev) {
 658				*np = n->next;
 659				if (tbl->pdestructor)
 660					tbl->pdestructor(n);
 661				if (n->dev)
 662					dev_put(n->dev);
 663				kfree(n);
 664				continue;
 665			}
 666			np = &n->next;
 667		}
 668	}
 669	return -ENOENT;
 670}
 671
 672static void neigh_parms_destroy(struct neigh_parms *parms);
 673
 674static inline void neigh_parms_put(struct neigh_parms *parms)
 675{
 676	if (atomic_dec_and_test(&parms->refcnt))
 677		neigh_parms_destroy(parms);
 678}
 679
 680/*
 681 *	neighbour must already be out of the table;
 682 *
 683 */
 684void neigh_destroy(struct neighbour *neigh)
 685{
 686	struct net_device *dev = neigh->dev;
 687
 688	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 689
 690	if (!neigh->dead) {
 691		pr_warn("Destroying alive neighbour %p\n", neigh);
 692		dump_stack();
 693		return;
 694	}
 695
 696	if (neigh_del_timer(neigh))
 697		pr_warn("Impossible event\n");
 698
 699	write_lock_bh(&neigh->lock);
 700	__skb_queue_purge(&neigh->arp_queue);
 701	write_unlock_bh(&neigh->lock);
 702	neigh->arp_queue_len_bytes = 0;
 703
 704	if (dev->netdev_ops->ndo_neigh_destroy)
 705		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
 706
 707	dev_put(dev);
 708	neigh_parms_put(neigh->parms);
 709
 710	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 711
 712	atomic_dec(&neigh->tbl->entries);
 713	kfree_rcu(neigh, rcu);
 714}
 715EXPORT_SYMBOL(neigh_destroy);
 716
 717/* Neighbour state is suspicious;
 718   disable fast path.
 719
 720   Called with write_locked neigh.
 721 */
 722static void neigh_suspect(struct neighbour *neigh)
 723{
 724	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 725
 726	neigh->output = neigh->ops->output;
 727}
 728
 729/* Neighbour state is OK;
 730   enable fast path.
 731
 732   Called with write_locked neigh.
 733 */
 734static void neigh_connect(struct neighbour *neigh)
 735{
 736	neigh_dbg(2, "neigh %p is connected\n", neigh);
 737
 738	neigh->output = neigh->ops->connected_output;
 739}
 740
 741static void neigh_periodic_work(struct work_struct *work)
 742{
 743	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 744	struct neighbour *n;
 745	struct neighbour __rcu **np;
 746	unsigned int i;
 747	struct neigh_hash_table *nht;
 748
 749	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 750
 751	write_lock_bh(&tbl->lock);
 752	nht = rcu_dereference_protected(tbl->nht,
 753					lockdep_is_held(&tbl->lock));
 754
 755	/*
 756	 *	periodically recompute ReachableTime from random function
 757	 */
 758
 759	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 760		struct neigh_parms *p;
 761		tbl->last_rand = jiffies;
 762		list_for_each_entry(p, &tbl->parms_list, list)
 763			p->reachable_time =
 764				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 765	}
 766
 767	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
 768		goto out;
 769
 770	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 771		np = &nht->hash_buckets[i];
 772
 773		while ((n = rcu_dereference_protected(*np,
 774				lockdep_is_held(&tbl->lock))) != NULL) {
 775			unsigned int state;
 776
 777			write_lock(&n->lock);
 778
 779			state = n->nud_state;
 780			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
 781				write_unlock(&n->lock);
 782				goto next_elt;
 783			}
 784
 785			if (time_before(n->used, n->confirmed))
 786				n->used = n->confirmed;
 787
 788			if (atomic_read(&n->refcnt) == 1 &&
 789			    (state == NUD_FAILED ||
 790			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 791				*np = n->next;
 792				n->dead = 1;
 793				write_unlock(&n->lock);
 794				neigh_cleanup_and_release(n);
 795				continue;
 796			}
 797			write_unlock(&n->lock);
 798
 799next_elt:
 800			np = &n->next;
 801		}
 802		/*
 803		 * It's fine to release lock here, even if hash table
 804		 * grows while we are preempted.
 805		 */
 806		write_unlock_bh(&tbl->lock);
 807		cond_resched();
 808		write_lock_bh(&tbl->lock);
 809		nht = rcu_dereference_protected(tbl->nht,
 810						lockdep_is_held(&tbl->lock));
 811	}
 812out:
 813	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 814	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 815	 * BASE_REACHABLE_TIME.
 816	 */
 817	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 818			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 819	write_unlock_bh(&tbl->lock);
 820}
 821
 822static __inline__ int neigh_max_probes(struct neighbour *n)
 823{
 824	struct neigh_parms *p = n->parms;
 825	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 826	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 827	        NEIGH_VAR(p, MCAST_PROBES));
 828}
 829
 830static void neigh_invalidate(struct neighbour *neigh)
 831	__releases(neigh->lock)
 832	__acquires(neigh->lock)
 833{
 834	struct sk_buff *skb;
 835
 836	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 837	neigh_dbg(2, "neigh %p is failed\n", neigh);
 838	neigh->updated = jiffies;
 839
 840	/* It is very thin place. report_unreachable is very complicated
 841	   routine. Particularly, it can hit the same neighbour entry!
 842
 843	   So that, we try to be accurate and avoid dead loop. --ANK
 844	 */
 845	while (neigh->nud_state == NUD_FAILED &&
 846	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
 847		write_unlock(&neigh->lock);
 848		neigh->ops->error_report(neigh, skb);
 849		write_lock(&neigh->lock);
 850	}
 851	__skb_queue_purge(&neigh->arp_queue);
 852	neigh->arp_queue_len_bytes = 0;
 853}
 854
 855static void neigh_probe(struct neighbour *neigh)
 856	__releases(neigh->lock)
 857{
 858	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
 859	/* keep skb alive even if arp_queue overflows */
 860	if (skb)
 861		skb = skb_clone(skb, GFP_ATOMIC);
 862	write_unlock(&neigh->lock);
 863	neigh->ops->solicit(neigh, skb);
 864	atomic_inc(&neigh->probes);
 865	kfree_skb(skb);
 866}
 867
 868/* Called when a timer expires for a neighbour entry. */
 869
 870static void neigh_timer_handler(unsigned long arg)
 871{
 872	unsigned long now, next;
 873	struct neighbour *neigh = (struct neighbour *)arg;
 874	unsigned int state;
 875	int notify = 0;
 876
 877	write_lock(&neigh->lock);
 878
 879	state = neigh->nud_state;
 880	now = jiffies;
 881	next = now + HZ;
 882
 883	if (!(state & NUD_IN_TIMER))
 884		goto out;
 885
 886	if (state & NUD_REACHABLE) {
 887		if (time_before_eq(now,
 888				   neigh->confirmed + neigh->parms->reachable_time)) {
 889			neigh_dbg(2, "neigh %p is still alive\n", neigh);
 890			next = neigh->confirmed + neigh->parms->reachable_time;
 891		} else if (time_before_eq(now,
 892					  neigh->used +
 893					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 894			neigh_dbg(2, "neigh %p is delayed\n", neigh);
 895			neigh->nud_state = NUD_DELAY;
 896			neigh->updated = jiffies;
 897			neigh_suspect(neigh);
 898			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
 899		} else {
 900			neigh_dbg(2, "neigh %p is suspected\n", neigh);
 901			neigh->nud_state = NUD_STALE;
 902			neigh->updated = jiffies;
 903			neigh_suspect(neigh);
 904			notify = 1;
 905		}
 906	} else if (state & NUD_DELAY) {
 907		if (time_before_eq(now,
 908				   neigh->confirmed +
 909				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 910			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
 911			neigh->nud_state = NUD_REACHABLE;
 912			neigh->updated = jiffies;
 913			neigh_connect(neigh);
 914			notify = 1;
 915			next = neigh->confirmed + neigh->parms->reachable_time;
 916		} else {
 917			neigh_dbg(2, "neigh %p is probed\n", neigh);
 918			neigh->nud_state = NUD_PROBE;
 919			neigh->updated = jiffies;
 920			atomic_set(&neigh->probes, 0);
 921			notify = 1;
 922			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 923		}
 924	} else {
 925		/* NUD_PROBE|NUD_INCOMPLETE */
 926		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 927	}
 928
 929	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
 930	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
 931		neigh->nud_state = NUD_FAILED;
 932		notify = 1;
 933		neigh_invalidate(neigh);
 934		goto out;
 935	}
 936
 937	if (neigh->nud_state & NUD_IN_TIMER) {
 938		if (time_before(next, jiffies + HZ/2))
 939			next = jiffies + HZ/2;
 940		if (!mod_timer(&neigh->timer, next))
 941			neigh_hold(neigh);
 942	}
 943	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
 944		neigh_probe(neigh);
 945	} else {
 946out:
 947		write_unlock(&neigh->lock);
 948	}
 949
 950	if (notify)
 951		neigh_update_notify(neigh);
 952
 953	neigh_release(neigh);
 954}
 955
 956int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 957{
 958	int rc;
 959	bool immediate_probe = false;
 960
 961	write_lock_bh(&neigh->lock);
 962
 963	rc = 0;
 964	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
 965		goto out_unlock_bh;
 966	if (neigh->dead)
 967		goto out_dead;
 968
 969	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
 970		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
 971		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
 972			unsigned long next, now = jiffies;
 973
 974			atomic_set(&neigh->probes,
 975				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
 976			neigh->nud_state     = NUD_INCOMPLETE;
 977			neigh->updated = now;
 978			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
 979					 HZ/2);
 980			neigh_add_timer(neigh, next);
 981			immediate_probe = true;
 982		} else {
 983			neigh->nud_state = NUD_FAILED;
 984			neigh->updated = jiffies;
 985			write_unlock_bh(&neigh->lock);
 986
 987			kfree_skb(skb);
 988			return 1;
 989		}
 990	} else if (neigh->nud_state & NUD_STALE) {
 991		neigh_dbg(2, "neigh %p is delayed\n", neigh);
 992		neigh->nud_state = NUD_DELAY;
 993		neigh->updated = jiffies;
 994		neigh_add_timer(neigh, jiffies +
 995				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
 996	}
 997
 998	if (neigh->nud_state == NUD_INCOMPLETE) {
 999		if (skb) {
1000			while (neigh->arp_queue_len_bytes + skb->truesize >
1001			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1002				struct sk_buff *buff;
1003
1004				buff = __skb_dequeue(&neigh->arp_queue);
1005				if (!buff)
1006					break;
1007				neigh->arp_queue_len_bytes -= buff->truesize;
1008				kfree_skb(buff);
1009				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1010			}
1011			skb_dst_force(skb);
1012			__skb_queue_tail(&neigh->arp_queue, skb);
1013			neigh->arp_queue_len_bytes += skb->truesize;
1014		}
1015		rc = 1;
1016	}
1017out_unlock_bh:
1018	if (immediate_probe)
1019		neigh_probe(neigh);
1020	else
1021		write_unlock(&neigh->lock);
1022	local_bh_enable();
1023	return rc;
1024
1025out_dead:
1026	if (neigh->nud_state & NUD_STALE)
1027		goto out_unlock_bh;
1028	write_unlock_bh(&neigh->lock);
1029	kfree_skb(skb);
1030	return 1;
1031}
1032EXPORT_SYMBOL(__neigh_event_send);
1033
1034static void neigh_update_hhs(struct neighbour *neigh)
1035{
1036	struct hh_cache *hh;
1037	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1038		= NULL;
1039
1040	if (neigh->dev->header_ops)
1041		update = neigh->dev->header_ops->cache_update;
1042
1043	if (update) {
1044		hh = &neigh->hh;
1045		if (hh->hh_len) {
1046			write_seqlock_bh(&hh->hh_lock);
1047			update(hh, neigh->dev, neigh->ha);
1048			write_sequnlock_bh(&hh->hh_lock);
1049		}
1050	}
1051}
1052
1053
1054
1055/* Generic update routine.
1056   -- lladdr is new lladdr or NULL, if it is not supplied.
1057   -- new    is new state.
1058   -- flags
1059	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1060				if it is different.
1061	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1062				lladdr instead of overriding it
1063				if it is different.
 
 
1064	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1065
1066	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1067				NTF_ROUTER flag.
1068	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1069				a router.
1070
1071   Caller MUST hold reference count on the entry.
1072 */
1073
1074int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1075		 u32 flags)
1076{
1077	u8 old;
1078	int err;
1079	int notify = 0;
1080	struct net_device *dev;
1081	int update_isrouter = 0;
1082
1083	write_lock_bh(&neigh->lock);
1084
1085	dev    = neigh->dev;
1086	old    = neigh->nud_state;
1087	err    = -EPERM;
1088
1089	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1090	    (old & (NUD_NOARP | NUD_PERMANENT)))
1091		goto out;
1092	if (neigh->dead)
1093		goto out;
1094
1095	if (!(new & NUD_VALID)) {
1096		neigh_del_timer(neigh);
1097		if (old & NUD_CONNECTED)
1098			neigh_suspect(neigh);
1099		neigh->nud_state = new;
1100		err = 0;
1101		notify = old & NUD_VALID;
1102		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1103		    (new & NUD_FAILED)) {
1104			neigh_invalidate(neigh);
1105			notify = 1;
1106		}
1107		goto out;
1108	}
1109
1110	/* Compare new lladdr with cached one */
1111	if (!dev->addr_len) {
1112		/* First case: device needs no address. */
1113		lladdr = neigh->ha;
1114	} else if (lladdr) {
1115		/* The second case: if something is already cached
1116		   and a new address is proposed:
1117		   - compare new & old
1118		   - if they are different, check override flag
1119		 */
1120		if ((old & NUD_VALID) &&
1121		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1122			lladdr = neigh->ha;
1123	} else {
1124		/* No address is supplied; if we know something,
1125		   use it, otherwise discard the request.
1126		 */
1127		err = -EINVAL;
1128		if (!(old & NUD_VALID))
1129			goto out;
1130		lladdr = neigh->ha;
1131	}
1132
1133	if (new & NUD_CONNECTED)
1134		neigh->confirmed = jiffies;
1135	neigh->updated = jiffies;
1136
1137	/* If entry was valid and address is not changed,
1138	   do not change entry state, if new one is STALE.
1139	 */
1140	err = 0;
1141	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1142	if (old & NUD_VALID) {
1143		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1144			update_isrouter = 0;
1145			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1146			    (old & NUD_CONNECTED)) {
1147				lladdr = neigh->ha;
1148				new = NUD_STALE;
1149			} else
1150				goto out;
1151		} else {
1152			if (lladdr == neigh->ha && new == NUD_STALE &&
1153			    !(flags & NEIGH_UPDATE_F_ADMIN))
 
 
1154				new = old;
1155		}
1156	}
1157
1158	if (new != old) {
1159		neigh_del_timer(neigh);
1160		if (new & NUD_PROBE)
1161			atomic_set(&neigh->probes, 0);
1162		if (new & NUD_IN_TIMER)
1163			neigh_add_timer(neigh, (jiffies +
1164						((new & NUD_REACHABLE) ?
1165						 neigh->parms->reachable_time :
1166						 0)));
1167		neigh->nud_state = new;
1168		notify = 1;
1169	}
1170
1171	if (lladdr != neigh->ha) {
1172		write_seqlock(&neigh->ha_lock);
1173		memcpy(&neigh->ha, lladdr, dev->addr_len);
1174		write_sequnlock(&neigh->ha_lock);
1175		neigh_update_hhs(neigh);
1176		if (!(new & NUD_CONNECTED))
1177			neigh->confirmed = jiffies -
1178				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1179		notify = 1;
1180	}
1181	if (new == old)
1182		goto out;
1183	if (new & NUD_CONNECTED)
1184		neigh_connect(neigh);
1185	else
1186		neigh_suspect(neigh);
1187	if (!(old & NUD_VALID)) {
1188		struct sk_buff *skb;
1189
1190		/* Again: avoid dead loop if something went wrong */
1191
1192		while (neigh->nud_state & NUD_VALID &&
1193		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1194			struct dst_entry *dst = skb_dst(skb);
1195			struct neighbour *n2, *n1 = neigh;
1196			write_unlock_bh(&neigh->lock);
1197
1198			rcu_read_lock();
1199
1200			/* Why not just use 'neigh' as-is?  The problem is that
1201			 * things such as shaper, eql, and sch_teql can end up
1202			 * using alternative, different, neigh objects to output
1203			 * the packet in the output path.  So what we need to do
1204			 * here is re-lookup the top-level neigh in the path so
1205			 * we can reinject the packet there.
1206			 */
1207			n2 = NULL;
1208			if (dst) {
1209				n2 = dst_neigh_lookup_skb(dst, skb);
1210				if (n2)
1211					n1 = n2;
1212			}
1213			n1->output(n1, skb);
1214			if (n2)
1215				neigh_release(n2);
1216			rcu_read_unlock();
1217
1218			write_lock_bh(&neigh->lock);
1219		}
1220		__skb_queue_purge(&neigh->arp_queue);
1221		neigh->arp_queue_len_bytes = 0;
1222	}
1223out:
1224	if (update_isrouter) {
1225		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1226			(neigh->flags | NTF_ROUTER) :
1227			(neigh->flags & ~NTF_ROUTER);
1228	}
1229	write_unlock_bh(&neigh->lock);
1230
1231	if (notify)
1232		neigh_update_notify(neigh);
1233
1234	return err;
1235}
1236EXPORT_SYMBOL(neigh_update);
1237
1238/* Update the neigh to listen temporarily for probe responses, even if it is
1239 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1240 */
1241void __neigh_set_probe_once(struct neighbour *neigh)
1242{
1243	if (neigh->dead)
1244		return;
1245	neigh->updated = jiffies;
1246	if (!(neigh->nud_state & NUD_FAILED))
1247		return;
1248	neigh->nud_state = NUD_INCOMPLETE;
1249	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1250	neigh_add_timer(neigh,
1251			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1252}
1253EXPORT_SYMBOL(__neigh_set_probe_once);
1254
1255struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1256				 u8 *lladdr, void *saddr,
1257				 struct net_device *dev)
1258{
1259	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1260						 lladdr || !dev->addr_len);
1261	if (neigh)
1262		neigh_update(neigh, lladdr, NUD_STALE,
1263			     NEIGH_UPDATE_F_OVERRIDE);
1264	return neigh;
1265}
1266EXPORT_SYMBOL(neigh_event_ns);
1267
1268/* called with read_lock_bh(&n->lock); */
1269static void neigh_hh_init(struct neighbour *n)
1270{
1271	struct net_device *dev = n->dev;
1272	__be16 prot = n->tbl->protocol;
1273	struct hh_cache	*hh = &n->hh;
1274
1275	write_lock_bh(&n->lock);
1276
1277	/* Only one thread can come in here and initialize the
1278	 * hh_cache entry.
1279	 */
1280	if (!hh->hh_len)
1281		dev->header_ops->cache(n, hh, prot);
1282
1283	write_unlock_bh(&n->lock);
1284}
1285
1286/* Slow and careful. */
1287
1288int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1289{
1290	int rc = 0;
1291
1292	if (!neigh_event_send(neigh, skb)) {
1293		int err;
1294		struct net_device *dev = neigh->dev;
1295		unsigned int seq;
1296
1297		if (dev->header_ops->cache && !neigh->hh.hh_len)
1298			neigh_hh_init(neigh);
1299
1300		do {
1301			__skb_pull(skb, skb_network_offset(skb));
1302			seq = read_seqbegin(&neigh->ha_lock);
1303			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1304					      neigh->ha, NULL, skb->len);
1305		} while (read_seqretry(&neigh->ha_lock, seq));
1306
1307		if (err >= 0)
1308			rc = dev_queue_xmit(skb);
1309		else
1310			goto out_kfree_skb;
1311	}
1312out:
1313	return rc;
1314out_kfree_skb:
1315	rc = -EINVAL;
1316	kfree_skb(skb);
1317	goto out;
1318}
1319EXPORT_SYMBOL(neigh_resolve_output);
1320
1321/* As fast as possible without hh cache */
1322
1323int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1324{
1325	struct net_device *dev = neigh->dev;
1326	unsigned int seq;
1327	int err;
1328
1329	do {
1330		__skb_pull(skb, skb_network_offset(skb));
1331		seq = read_seqbegin(&neigh->ha_lock);
1332		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1333				      neigh->ha, NULL, skb->len);
1334	} while (read_seqretry(&neigh->ha_lock, seq));
1335
1336	if (err >= 0)
1337		err = dev_queue_xmit(skb);
1338	else {
1339		err = -EINVAL;
1340		kfree_skb(skb);
1341	}
1342	return err;
1343}
1344EXPORT_SYMBOL(neigh_connected_output);
1345
1346int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1347{
1348	return dev_queue_xmit(skb);
1349}
1350EXPORT_SYMBOL(neigh_direct_output);
1351
1352static void neigh_proxy_process(unsigned long arg)
1353{
1354	struct neigh_table *tbl = (struct neigh_table *)arg;
1355	long sched_next = 0;
1356	unsigned long now = jiffies;
1357	struct sk_buff *skb, *n;
1358
1359	spin_lock(&tbl->proxy_queue.lock);
1360
1361	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1362		long tdif = NEIGH_CB(skb)->sched_next - now;
1363
1364		if (tdif <= 0) {
1365			struct net_device *dev = skb->dev;
1366
1367			__skb_unlink(skb, &tbl->proxy_queue);
1368			if (tbl->proxy_redo && netif_running(dev)) {
1369				rcu_read_lock();
1370				tbl->proxy_redo(skb);
1371				rcu_read_unlock();
1372			} else {
1373				kfree_skb(skb);
1374			}
1375
1376			dev_put(dev);
1377		} else if (!sched_next || tdif < sched_next)
1378			sched_next = tdif;
1379	}
1380	del_timer(&tbl->proxy_timer);
1381	if (sched_next)
1382		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1383	spin_unlock(&tbl->proxy_queue.lock);
1384}
1385
1386void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1387		    struct sk_buff *skb)
1388{
1389	unsigned long now = jiffies;
1390
1391	unsigned long sched_next = now + (prandom_u32() %
1392					  NEIGH_VAR(p, PROXY_DELAY));
1393
1394	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1395		kfree_skb(skb);
1396		return;
1397	}
1398
1399	NEIGH_CB(skb)->sched_next = sched_next;
1400	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1401
1402	spin_lock(&tbl->proxy_queue.lock);
1403	if (del_timer(&tbl->proxy_timer)) {
1404		if (time_before(tbl->proxy_timer.expires, sched_next))
1405			sched_next = tbl->proxy_timer.expires;
1406	}
1407	skb_dst_drop(skb);
1408	dev_hold(skb->dev);
1409	__skb_queue_tail(&tbl->proxy_queue, skb);
1410	mod_timer(&tbl->proxy_timer, sched_next);
1411	spin_unlock(&tbl->proxy_queue.lock);
1412}
1413EXPORT_SYMBOL(pneigh_enqueue);
1414
1415static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1416						      struct net *net, int ifindex)
1417{
1418	struct neigh_parms *p;
1419
1420	list_for_each_entry(p, &tbl->parms_list, list) {
1421		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1422		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1423			return p;
1424	}
1425
1426	return NULL;
1427}
1428
1429struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1430				      struct neigh_table *tbl)
1431{
1432	struct neigh_parms *p;
1433	struct net *net = dev_net(dev);
1434	const struct net_device_ops *ops = dev->netdev_ops;
1435
1436	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1437	if (p) {
1438		p->tbl		  = tbl;
1439		atomic_set(&p->refcnt, 1);
1440		p->reachable_time =
1441				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1442		dev_hold(dev);
1443		p->dev = dev;
1444		write_pnet(&p->net, net);
1445		p->sysctl_table = NULL;
1446
1447		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1448			dev_put(dev);
1449			kfree(p);
1450			return NULL;
1451		}
1452
1453		write_lock_bh(&tbl->lock);
1454		list_add(&p->list, &tbl->parms.list);
1455		write_unlock_bh(&tbl->lock);
1456
1457		neigh_parms_data_state_cleanall(p);
1458	}
1459	return p;
1460}
1461EXPORT_SYMBOL(neigh_parms_alloc);
1462
1463static void neigh_rcu_free_parms(struct rcu_head *head)
1464{
1465	struct neigh_parms *parms =
1466		container_of(head, struct neigh_parms, rcu_head);
1467
1468	neigh_parms_put(parms);
1469}
1470
1471void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1472{
1473	if (!parms || parms == &tbl->parms)
1474		return;
1475	write_lock_bh(&tbl->lock);
1476	list_del(&parms->list);
1477	parms->dead = 1;
1478	write_unlock_bh(&tbl->lock);
1479	if (parms->dev)
1480		dev_put(parms->dev);
1481	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1482}
1483EXPORT_SYMBOL(neigh_parms_release);
1484
1485static void neigh_parms_destroy(struct neigh_parms *parms)
1486{
1487	kfree(parms);
1488}
1489
1490static struct lock_class_key neigh_table_proxy_queue_class;
1491
1492static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1493
1494void neigh_table_init(int index, struct neigh_table *tbl)
1495{
1496	unsigned long now = jiffies;
1497	unsigned long phsize;
1498
1499	INIT_LIST_HEAD(&tbl->parms_list);
1500	list_add(&tbl->parms.list, &tbl->parms_list);
1501	write_pnet(&tbl->parms.net, &init_net);
1502	atomic_set(&tbl->parms.refcnt, 1);
1503	tbl->parms.reachable_time =
1504			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1505
1506	tbl->stats = alloc_percpu(struct neigh_statistics);
1507	if (!tbl->stats)
1508		panic("cannot create neighbour cache statistics");
1509
1510#ifdef CONFIG_PROC_FS
1511	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1512			      &neigh_stat_seq_fops, tbl))
1513		panic("cannot create neighbour proc dir entry");
1514#endif
1515
1516	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1517
1518	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1519	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1520
1521	if (!tbl->nht || !tbl->phash_buckets)
1522		panic("cannot allocate neighbour cache hashes");
1523
1524	if (!tbl->entry_size)
1525		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1526					tbl->key_len, NEIGH_PRIV_ALIGN);
1527	else
1528		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1529
1530	rwlock_init(&tbl->lock);
1531	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1532	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1533			tbl->parms.reachable_time);
1534	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1535	skb_queue_head_init_class(&tbl->proxy_queue,
1536			&neigh_table_proxy_queue_class);
1537
1538	tbl->last_flush = now;
1539	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1540
1541	neigh_tables[index] = tbl;
1542}
1543EXPORT_SYMBOL(neigh_table_init);
1544
1545int neigh_table_clear(int index, struct neigh_table *tbl)
1546{
1547	neigh_tables[index] = NULL;
1548	/* It is not clean... Fix it to unload IPv6 module safely */
1549	cancel_delayed_work_sync(&tbl->gc_work);
1550	del_timer_sync(&tbl->proxy_timer);
1551	pneigh_queue_purge(&tbl->proxy_queue);
1552	neigh_ifdown(tbl, NULL);
1553	if (atomic_read(&tbl->entries))
1554		pr_crit("neighbour leakage\n");
1555
1556	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1557		 neigh_hash_free_rcu);
1558	tbl->nht = NULL;
1559
1560	kfree(tbl->phash_buckets);
1561	tbl->phash_buckets = NULL;
1562
1563	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1564
1565	free_percpu(tbl->stats);
1566	tbl->stats = NULL;
1567
1568	return 0;
1569}
1570EXPORT_SYMBOL(neigh_table_clear);
1571
1572static struct neigh_table *neigh_find_table(int family)
1573{
1574	struct neigh_table *tbl = NULL;
1575
1576	switch (family) {
1577	case AF_INET:
1578		tbl = neigh_tables[NEIGH_ARP_TABLE];
1579		break;
1580	case AF_INET6:
1581		tbl = neigh_tables[NEIGH_ND_TABLE];
1582		break;
1583	case AF_DECnet:
1584		tbl = neigh_tables[NEIGH_DN_TABLE];
1585		break;
1586	}
1587
1588	return tbl;
1589}
1590
1591static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1592{
1593	struct net *net = sock_net(skb->sk);
1594	struct ndmsg *ndm;
1595	struct nlattr *dst_attr;
1596	struct neigh_table *tbl;
1597	struct neighbour *neigh;
1598	struct net_device *dev = NULL;
1599	int err = -EINVAL;
1600
1601	ASSERT_RTNL();
1602	if (nlmsg_len(nlh) < sizeof(*ndm))
1603		goto out;
1604
1605	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1606	if (dst_attr == NULL)
1607		goto out;
1608
1609	ndm = nlmsg_data(nlh);
1610	if (ndm->ndm_ifindex) {
1611		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1612		if (dev == NULL) {
1613			err = -ENODEV;
1614			goto out;
1615		}
1616	}
1617
1618	tbl = neigh_find_table(ndm->ndm_family);
1619	if (tbl == NULL)
1620		return -EAFNOSUPPORT;
1621
1622	if (nla_len(dst_attr) < tbl->key_len)
1623		goto out;
1624
1625	if (ndm->ndm_flags & NTF_PROXY) {
1626		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1627		goto out;
1628	}
1629
1630	if (dev == NULL)
1631		goto out;
1632
1633	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1634	if (neigh == NULL) {
1635		err = -ENOENT;
1636		goto out;
1637	}
1638
1639	err = neigh_update(neigh, NULL, NUD_FAILED,
1640			   NEIGH_UPDATE_F_OVERRIDE |
1641			   NEIGH_UPDATE_F_ADMIN);
1642	neigh_release(neigh);
1643
1644out:
1645	return err;
1646}
1647
1648static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1649{
1650	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1651	struct net *net = sock_net(skb->sk);
1652	struct ndmsg *ndm;
1653	struct nlattr *tb[NDA_MAX+1];
1654	struct neigh_table *tbl;
1655	struct net_device *dev = NULL;
1656	struct neighbour *neigh;
1657	void *dst, *lladdr;
1658	int err;
1659
1660	ASSERT_RTNL();
1661	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1662	if (err < 0)
1663		goto out;
1664
1665	err = -EINVAL;
1666	if (tb[NDA_DST] == NULL)
1667		goto out;
1668
1669	ndm = nlmsg_data(nlh);
1670	if (ndm->ndm_ifindex) {
1671		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1672		if (dev == NULL) {
1673			err = -ENODEV;
1674			goto out;
1675		}
1676
1677		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1678			goto out;
1679	}
1680
1681	tbl = neigh_find_table(ndm->ndm_family);
1682	if (tbl == NULL)
1683		return -EAFNOSUPPORT;
1684
1685	if (nla_len(tb[NDA_DST]) < tbl->key_len)
1686		goto out;
1687	dst = nla_data(tb[NDA_DST]);
1688	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1689
1690	if (ndm->ndm_flags & NTF_PROXY) {
1691		struct pneigh_entry *pn;
1692
1693		err = -ENOBUFS;
1694		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1695		if (pn) {
1696			pn->flags = ndm->ndm_flags;
1697			err = 0;
1698		}
1699		goto out;
1700	}
1701
1702	if (dev == NULL)
1703		goto out;
1704
1705	neigh = neigh_lookup(tbl, dst, dev);
1706	if (neigh == NULL) {
1707		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1708			err = -ENOENT;
1709			goto out;
1710		}
1711
1712		neigh = __neigh_lookup_errno(tbl, dst, dev);
1713		if (IS_ERR(neigh)) {
1714			err = PTR_ERR(neigh);
1715			goto out;
1716		}
1717	} else {
1718		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1719			err = -EEXIST;
1720			neigh_release(neigh);
1721			goto out;
1722		}
1723
1724		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1725			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1726	}
1727
1728	if (ndm->ndm_flags & NTF_USE) {
1729		neigh_event_send(neigh, NULL);
1730		err = 0;
1731	} else
1732		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1733	neigh_release(neigh);
1734
1735out:
1736	return err;
1737}
1738
1739static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1740{
1741	struct nlattr *nest;
1742
1743	nest = nla_nest_start(skb, NDTA_PARMS);
1744	if (nest == NULL)
1745		return -ENOBUFS;
1746
1747	if ((parms->dev &&
1748	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1749	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1750	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1751			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1752	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1753	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1754			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1755	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1756	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1757	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1758			NEIGH_VAR(parms, UCAST_PROBES)) ||
1759	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1760			NEIGH_VAR(parms, MCAST_PROBES)) ||
1761	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1762			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1763	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1764			  NDTPA_PAD) ||
1765	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1766			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1767	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1768			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1769	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1770			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1771	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1772			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1773	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1774			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1775	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1776			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1777	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1778			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1779		goto nla_put_failure;
1780	return nla_nest_end(skb, nest);
1781
1782nla_put_failure:
1783	nla_nest_cancel(skb, nest);
1784	return -EMSGSIZE;
1785}
1786
1787static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1788			      u32 pid, u32 seq, int type, int flags)
1789{
1790	struct nlmsghdr *nlh;
1791	struct ndtmsg *ndtmsg;
1792
1793	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1794	if (nlh == NULL)
1795		return -EMSGSIZE;
1796
1797	ndtmsg = nlmsg_data(nlh);
1798
1799	read_lock_bh(&tbl->lock);
1800	ndtmsg->ndtm_family = tbl->family;
1801	ndtmsg->ndtm_pad1   = 0;
1802	ndtmsg->ndtm_pad2   = 0;
1803
1804	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1805	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1806	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1807	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1808	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1809		goto nla_put_failure;
1810	{
1811		unsigned long now = jiffies;
1812		unsigned int flush_delta = now - tbl->last_flush;
1813		unsigned int rand_delta = now - tbl->last_rand;
1814		struct neigh_hash_table *nht;
1815		struct ndt_config ndc = {
1816			.ndtc_key_len		= tbl->key_len,
1817			.ndtc_entry_size	= tbl->entry_size,
1818			.ndtc_entries		= atomic_read(&tbl->entries),
1819			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1820			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1821			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1822		};
1823
1824		rcu_read_lock_bh();
1825		nht = rcu_dereference_bh(tbl->nht);
1826		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1827		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1828		rcu_read_unlock_bh();
1829
1830		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1831			goto nla_put_failure;
1832	}
1833
1834	{
1835		int cpu;
1836		struct ndt_stats ndst;
1837
1838		memset(&ndst, 0, sizeof(ndst));
1839
1840		for_each_possible_cpu(cpu) {
1841			struct neigh_statistics	*st;
1842
1843			st = per_cpu_ptr(tbl->stats, cpu);
1844			ndst.ndts_allocs		+= st->allocs;
1845			ndst.ndts_destroys		+= st->destroys;
1846			ndst.ndts_hash_grows		+= st->hash_grows;
1847			ndst.ndts_res_failed		+= st->res_failed;
1848			ndst.ndts_lookups		+= st->lookups;
1849			ndst.ndts_hits			+= st->hits;
1850			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1851			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1852			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1853			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1854			ndst.ndts_table_fulls		+= st->table_fulls;
1855		}
1856
1857		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1858				  NDTA_PAD))
1859			goto nla_put_failure;
1860	}
1861
1862	BUG_ON(tbl->parms.dev);
1863	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1864		goto nla_put_failure;
1865
1866	read_unlock_bh(&tbl->lock);
1867	nlmsg_end(skb, nlh);
1868	return 0;
1869
1870nla_put_failure:
1871	read_unlock_bh(&tbl->lock);
1872	nlmsg_cancel(skb, nlh);
1873	return -EMSGSIZE;
1874}
1875
1876static int neightbl_fill_param_info(struct sk_buff *skb,
1877				    struct neigh_table *tbl,
1878				    struct neigh_parms *parms,
1879				    u32 pid, u32 seq, int type,
1880				    unsigned int flags)
1881{
1882	struct ndtmsg *ndtmsg;
1883	struct nlmsghdr *nlh;
1884
1885	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1886	if (nlh == NULL)
1887		return -EMSGSIZE;
1888
1889	ndtmsg = nlmsg_data(nlh);
1890
1891	read_lock_bh(&tbl->lock);
1892	ndtmsg->ndtm_family = tbl->family;
1893	ndtmsg->ndtm_pad1   = 0;
1894	ndtmsg->ndtm_pad2   = 0;
1895
1896	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1897	    neightbl_fill_parms(skb, parms) < 0)
1898		goto errout;
1899
1900	read_unlock_bh(&tbl->lock);
1901	nlmsg_end(skb, nlh);
1902	return 0;
1903errout:
1904	read_unlock_bh(&tbl->lock);
1905	nlmsg_cancel(skb, nlh);
1906	return -EMSGSIZE;
1907}
1908
1909static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1910	[NDTA_NAME]		= { .type = NLA_STRING },
1911	[NDTA_THRESH1]		= { .type = NLA_U32 },
1912	[NDTA_THRESH2]		= { .type = NLA_U32 },
1913	[NDTA_THRESH3]		= { .type = NLA_U32 },
1914	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1915	[NDTA_PARMS]		= { .type = NLA_NESTED },
1916};
1917
1918static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1919	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1920	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1921	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1922	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1923	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1924	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1925	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1926	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1927	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1928	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1929	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1930	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1931	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1932	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1933};
1934
1935static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1936{
1937	struct net *net = sock_net(skb->sk);
1938	struct neigh_table *tbl;
1939	struct ndtmsg *ndtmsg;
1940	struct nlattr *tb[NDTA_MAX+1];
1941	bool found = false;
1942	int err, tidx;
1943
1944	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1945			  nl_neightbl_policy);
1946	if (err < 0)
1947		goto errout;
1948
1949	if (tb[NDTA_NAME] == NULL) {
1950		err = -EINVAL;
1951		goto errout;
1952	}
1953
1954	ndtmsg = nlmsg_data(nlh);
1955
1956	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1957		tbl = neigh_tables[tidx];
1958		if (!tbl)
1959			continue;
1960		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1961			continue;
1962		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1963			found = true;
1964			break;
1965		}
1966	}
1967
1968	if (!found)
1969		return -ENOENT;
1970
1971	/*
1972	 * We acquire tbl->lock to be nice to the periodic timers and
1973	 * make sure they always see a consistent set of values.
1974	 */
1975	write_lock_bh(&tbl->lock);
1976
1977	if (tb[NDTA_PARMS]) {
1978		struct nlattr *tbp[NDTPA_MAX+1];
1979		struct neigh_parms *p;
1980		int i, ifindex = 0;
1981
1982		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1983				       nl_ntbl_parm_policy);
1984		if (err < 0)
1985			goto errout_tbl_lock;
1986
1987		if (tbp[NDTPA_IFINDEX])
1988			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1989
1990		p = lookup_neigh_parms(tbl, net, ifindex);
1991		if (p == NULL) {
1992			err = -ENOENT;
1993			goto errout_tbl_lock;
1994		}
1995
1996		for (i = 1; i <= NDTPA_MAX; i++) {
1997			if (tbp[i] == NULL)
1998				continue;
1999
2000			switch (i) {
2001			case NDTPA_QUEUE_LEN:
2002				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2003					      nla_get_u32(tbp[i]) *
2004					      SKB_TRUESIZE(ETH_FRAME_LEN));
2005				break;
2006			case NDTPA_QUEUE_LENBYTES:
2007				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2008					      nla_get_u32(tbp[i]));
2009				break;
2010			case NDTPA_PROXY_QLEN:
2011				NEIGH_VAR_SET(p, PROXY_QLEN,
2012					      nla_get_u32(tbp[i]));
2013				break;
2014			case NDTPA_APP_PROBES:
2015				NEIGH_VAR_SET(p, APP_PROBES,
2016					      nla_get_u32(tbp[i]));
2017				break;
2018			case NDTPA_UCAST_PROBES:
2019				NEIGH_VAR_SET(p, UCAST_PROBES,
2020					      nla_get_u32(tbp[i]));
2021				break;
2022			case NDTPA_MCAST_PROBES:
2023				NEIGH_VAR_SET(p, MCAST_PROBES,
2024					      nla_get_u32(tbp[i]));
2025				break;
2026			case NDTPA_MCAST_REPROBES:
2027				NEIGH_VAR_SET(p, MCAST_REPROBES,
2028					      nla_get_u32(tbp[i]));
2029				break;
2030			case NDTPA_BASE_REACHABLE_TIME:
2031				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2032					      nla_get_msecs(tbp[i]));
2033				/* update reachable_time as well, otherwise, the change will
2034				 * only be effective after the next time neigh_periodic_work
2035				 * decides to recompute it (can be multiple minutes)
2036				 */
2037				p->reachable_time =
2038					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2039				break;
2040			case NDTPA_GC_STALETIME:
2041				NEIGH_VAR_SET(p, GC_STALETIME,
2042					      nla_get_msecs(tbp[i]));
2043				break;
2044			case NDTPA_DELAY_PROBE_TIME:
2045				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2046					      nla_get_msecs(tbp[i]));
2047				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2048				break;
2049			case NDTPA_RETRANS_TIME:
2050				NEIGH_VAR_SET(p, RETRANS_TIME,
2051					      nla_get_msecs(tbp[i]));
2052				break;
2053			case NDTPA_ANYCAST_DELAY:
2054				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2055					      nla_get_msecs(tbp[i]));
2056				break;
2057			case NDTPA_PROXY_DELAY:
2058				NEIGH_VAR_SET(p, PROXY_DELAY,
2059					      nla_get_msecs(tbp[i]));
2060				break;
2061			case NDTPA_LOCKTIME:
2062				NEIGH_VAR_SET(p, LOCKTIME,
2063					      nla_get_msecs(tbp[i]));
2064				break;
2065			}
2066		}
2067	}
2068
2069	err = -ENOENT;
2070	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2071	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2072	    !net_eq(net, &init_net))
2073		goto errout_tbl_lock;
2074
2075	if (tb[NDTA_THRESH1])
2076		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2077
2078	if (tb[NDTA_THRESH2])
2079		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2080
2081	if (tb[NDTA_THRESH3])
2082		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2083
2084	if (tb[NDTA_GC_INTERVAL])
2085		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2086
2087	err = 0;
2088
2089errout_tbl_lock:
2090	write_unlock_bh(&tbl->lock);
2091errout:
2092	return err;
2093}
2094
2095static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2096{
2097	struct net *net = sock_net(skb->sk);
2098	int family, tidx, nidx = 0;
2099	int tbl_skip = cb->args[0];
2100	int neigh_skip = cb->args[1];
2101	struct neigh_table *tbl;
2102
2103	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2104
2105	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2106		struct neigh_parms *p;
2107
2108		tbl = neigh_tables[tidx];
2109		if (!tbl)
2110			continue;
2111
2112		if (tidx < tbl_skip || (family && tbl->family != family))
2113			continue;
2114
2115		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2116				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2117				       NLM_F_MULTI) < 0)
2118			break;
2119
2120		nidx = 0;
2121		p = list_next_entry(&tbl->parms, list);
2122		list_for_each_entry_from(p, &tbl->parms_list, list) {
2123			if (!net_eq(neigh_parms_net(p), net))
2124				continue;
2125
2126			if (nidx < neigh_skip)
2127				goto next;
2128
2129			if (neightbl_fill_param_info(skb, tbl, p,
2130						     NETLINK_CB(cb->skb).portid,
2131						     cb->nlh->nlmsg_seq,
2132						     RTM_NEWNEIGHTBL,
2133						     NLM_F_MULTI) < 0)
2134				goto out;
2135		next:
2136			nidx++;
2137		}
2138
2139		neigh_skip = 0;
2140	}
2141out:
2142	cb->args[0] = tidx;
2143	cb->args[1] = nidx;
2144
2145	return skb->len;
2146}
2147
2148static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2149			   u32 pid, u32 seq, int type, unsigned int flags)
2150{
2151	unsigned long now = jiffies;
2152	struct nda_cacheinfo ci;
2153	struct nlmsghdr *nlh;
2154	struct ndmsg *ndm;
2155
2156	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2157	if (nlh == NULL)
2158		return -EMSGSIZE;
2159
2160	ndm = nlmsg_data(nlh);
2161	ndm->ndm_family	 = neigh->ops->family;
2162	ndm->ndm_pad1    = 0;
2163	ndm->ndm_pad2    = 0;
2164	ndm->ndm_flags	 = neigh->flags;
2165	ndm->ndm_type	 = neigh->type;
2166	ndm->ndm_ifindex = neigh->dev->ifindex;
2167
2168	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2169		goto nla_put_failure;
2170
2171	read_lock_bh(&neigh->lock);
2172	ndm->ndm_state	 = neigh->nud_state;
2173	if (neigh->nud_state & NUD_VALID) {
2174		char haddr[MAX_ADDR_LEN];
2175
2176		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2177		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2178			read_unlock_bh(&neigh->lock);
2179			goto nla_put_failure;
2180		}
2181	}
2182
2183	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2184	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2185	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2186	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2187	read_unlock_bh(&neigh->lock);
2188
2189	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2190	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2191		goto nla_put_failure;
2192
2193	nlmsg_end(skb, nlh);
2194	return 0;
2195
2196nla_put_failure:
2197	nlmsg_cancel(skb, nlh);
2198	return -EMSGSIZE;
2199}
2200
2201static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2202			    u32 pid, u32 seq, int type, unsigned int flags,
2203			    struct neigh_table *tbl)
2204{
2205	struct nlmsghdr *nlh;
2206	struct ndmsg *ndm;
2207
2208	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2209	if (nlh == NULL)
2210		return -EMSGSIZE;
2211
2212	ndm = nlmsg_data(nlh);
2213	ndm->ndm_family	 = tbl->family;
2214	ndm->ndm_pad1    = 0;
2215	ndm->ndm_pad2    = 0;
2216	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2217	ndm->ndm_type	 = RTN_UNICAST;
2218	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219	ndm->ndm_state	 = NUD_NONE;
2220
2221	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2222		goto nla_put_failure;
2223
2224	nlmsg_end(skb, nlh);
2225	return 0;
2226
2227nla_put_failure:
2228	nlmsg_cancel(skb, nlh);
2229	return -EMSGSIZE;
2230}
2231
2232static void neigh_update_notify(struct neighbour *neigh)
2233{
2234	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2235	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2236}
2237
2238static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2239{
2240	struct net_device *master;
2241
2242	if (!master_idx)
2243		return false;
2244
2245	master = netdev_master_upper_dev_get(dev);
2246	if (!master || master->ifindex != master_idx)
2247		return true;
2248
2249	return false;
2250}
2251
2252static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2253{
2254	if (filter_idx && dev->ifindex != filter_idx)
2255		return true;
2256
2257	return false;
2258}
2259
2260static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2261			    struct netlink_callback *cb)
2262{
2263	struct net *net = sock_net(skb->sk);
2264	const struct nlmsghdr *nlh = cb->nlh;
2265	struct nlattr *tb[NDA_MAX + 1];
2266	struct neighbour *n;
2267	int rc, h, s_h = cb->args[1];
2268	int idx, s_idx = idx = cb->args[2];
2269	struct neigh_hash_table *nht;
2270	int filter_master_idx = 0, filter_idx = 0;
2271	unsigned int flags = NLM_F_MULTI;
2272	int err;
2273
2274	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2275	if (!err) {
2276		if (tb[NDA_IFINDEX])
2277			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2278
2279		if (tb[NDA_MASTER])
2280			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2281
2282		if (filter_idx || filter_master_idx)
2283			flags |= NLM_F_DUMP_FILTERED;
2284	}
2285
2286	rcu_read_lock_bh();
2287	nht = rcu_dereference_bh(tbl->nht);
2288
2289	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2290		if (h > s_h)
2291			s_idx = 0;
2292		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2293		     n != NULL;
2294		     n = rcu_dereference_bh(n->next)) {
2295			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2296				goto next;
2297			if (neigh_ifindex_filtered(n->dev, filter_idx) ||
2298			    neigh_master_filtered(n->dev, filter_master_idx))
 
 
 
2299				goto next;
2300			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2301					    cb->nlh->nlmsg_seq,
2302					    RTM_NEWNEIGH,
2303					    flags) < 0) {
2304				rc = -1;
2305				goto out;
2306			}
2307next:
2308			idx++;
2309		}
2310	}
2311	rc = skb->len;
2312out:
2313	rcu_read_unlock_bh();
2314	cb->args[1] = h;
2315	cb->args[2] = idx;
2316	return rc;
2317}
2318
2319static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2320			     struct netlink_callback *cb)
2321{
2322	struct pneigh_entry *n;
2323	struct net *net = sock_net(skb->sk);
2324	int rc, h, s_h = cb->args[3];
2325	int idx, s_idx = idx = cb->args[4];
2326
2327	read_lock_bh(&tbl->lock);
2328
2329	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2330		if (h > s_h)
2331			s_idx = 0;
2332		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2333			if (idx < s_idx || pneigh_net(n) != net)
 
 
2334				goto next;
2335			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2336					    cb->nlh->nlmsg_seq,
2337					    RTM_NEWNEIGH,
2338					    NLM_F_MULTI, tbl) < 0) {
2339				read_unlock_bh(&tbl->lock);
2340				rc = -1;
2341				goto out;
2342			}
2343		next:
2344			idx++;
2345		}
2346	}
2347
2348	read_unlock_bh(&tbl->lock);
2349	rc = skb->len;
2350out:
2351	cb->args[3] = h;
2352	cb->args[4] = idx;
2353	return rc;
2354
2355}
2356
2357static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2358{
2359	struct neigh_table *tbl;
2360	int t, family, s_t;
2361	int proxy = 0;
2362	int err;
2363
2364	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2365
2366	/* check for full ndmsg structure presence, family member is
2367	 * the same for both structures
2368	 */
2369	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2370	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2371		proxy = 1;
2372
2373	s_t = cb->args[0];
2374
2375	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2376		tbl = neigh_tables[t];
2377
2378		if (!tbl)
2379			continue;
2380		if (t < s_t || (family && tbl->family != family))
2381			continue;
2382		if (t > s_t)
2383			memset(&cb->args[1], 0, sizeof(cb->args) -
2384						sizeof(cb->args[0]));
2385		if (proxy)
2386			err = pneigh_dump_table(tbl, skb, cb);
2387		else
2388			err = neigh_dump_table(tbl, skb, cb);
2389		if (err < 0)
2390			break;
2391	}
2392
2393	cb->args[0] = t;
2394	return skb->len;
2395}
2396
2397void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2398{
2399	int chain;
2400	struct neigh_hash_table *nht;
2401
2402	rcu_read_lock_bh();
2403	nht = rcu_dereference_bh(tbl->nht);
2404
2405	read_lock(&tbl->lock); /* avoid resizes */
2406	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2407		struct neighbour *n;
2408
2409		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2410		     n != NULL;
2411		     n = rcu_dereference_bh(n->next))
2412			cb(n, cookie);
2413	}
2414	read_unlock(&tbl->lock);
2415	rcu_read_unlock_bh();
2416}
2417EXPORT_SYMBOL(neigh_for_each);
2418
2419/* The tbl->lock must be held as a writer and BH disabled. */
2420void __neigh_for_each_release(struct neigh_table *tbl,
2421			      int (*cb)(struct neighbour *))
2422{
2423	int chain;
2424	struct neigh_hash_table *nht;
2425
2426	nht = rcu_dereference_protected(tbl->nht,
2427					lockdep_is_held(&tbl->lock));
2428	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2429		struct neighbour *n;
2430		struct neighbour __rcu **np;
2431
2432		np = &nht->hash_buckets[chain];
2433		while ((n = rcu_dereference_protected(*np,
2434					lockdep_is_held(&tbl->lock))) != NULL) {
2435			int release;
2436
2437			write_lock(&n->lock);
2438			release = cb(n);
2439			if (release) {
2440				rcu_assign_pointer(*np,
2441					rcu_dereference_protected(n->next,
2442						lockdep_is_held(&tbl->lock)));
2443				n->dead = 1;
2444			} else
2445				np = &n->next;
2446			write_unlock(&n->lock);
2447			if (release)
2448				neigh_cleanup_and_release(n);
2449		}
2450	}
2451}
2452EXPORT_SYMBOL(__neigh_for_each_release);
2453
2454int neigh_xmit(int index, struct net_device *dev,
2455	       const void *addr, struct sk_buff *skb)
2456{
2457	int err = -EAFNOSUPPORT;
2458	if (likely(index < NEIGH_NR_TABLES)) {
2459		struct neigh_table *tbl;
2460		struct neighbour *neigh;
2461
2462		tbl = neigh_tables[index];
2463		if (!tbl)
2464			goto out;
2465		rcu_read_lock_bh();
2466		neigh = __neigh_lookup_noref(tbl, addr, dev);
2467		if (!neigh)
2468			neigh = __neigh_create(tbl, addr, dev, false);
2469		err = PTR_ERR(neigh);
2470		if (IS_ERR(neigh)) {
2471			rcu_read_unlock_bh();
2472			goto out_kfree_skb;
2473		}
2474		err = neigh->output(neigh, skb);
2475		rcu_read_unlock_bh();
2476	}
2477	else if (index == NEIGH_LINK_TABLE) {
2478		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2479				      addr, NULL, skb->len);
2480		if (err < 0)
2481			goto out_kfree_skb;
2482		err = dev_queue_xmit(skb);
2483	}
2484out:
2485	return err;
2486out_kfree_skb:
2487	kfree_skb(skb);
2488	goto out;
2489}
2490EXPORT_SYMBOL(neigh_xmit);
2491
2492#ifdef CONFIG_PROC_FS
2493
2494static struct neighbour *neigh_get_first(struct seq_file *seq)
2495{
2496	struct neigh_seq_state *state = seq->private;
2497	struct net *net = seq_file_net(seq);
2498	struct neigh_hash_table *nht = state->nht;
2499	struct neighbour *n = NULL;
2500	int bucket = state->bucket;
2501
2502	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2503	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2504		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2505
2506		while (n) {
2507			if (!net_eq(dev_net(n->dev), net))
2508				goto next;
2509			if (state->neigh_sub_iter) {
2510				loff_t fakep = 0;
2511				void *v;
2512
2513				v = state->neigh_sub_iter(state, n, &fakep);
2514				if (!v)
2515					goto next;
2516			}
2517			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2518				break;
2519			if (n->nud_state & ~NUD_NOARP)
2520				break;
2521next:
2522			n = rcu_dereference_bh(n->next);
2523		}
2524
2525		if (n)
2526			break;
2527	}
2528	state->bucket = bucket;
2529
2530	return n;
2531}
2532
2533static struct neighbour *neigh_get_next(struct seq_file *seq,
2534					struct neighbour *n,
2535					loff_t *pos)
2536{
2537	struct neigh_seq_state *state = seq->private;
2538	struct net *net = seq_file_net(seq);
2539	struct neigh_hash_table *nht = state->nht;
2540
2541	if (state->neigh_sub_iter) {
2542		void *v = state->neigh_sub_iter(state, n, pos);
2543		if (v)
2544			return n;
2545	}
2546	n = rcu_dereference_bh(n->next);
2547
2548	while (1) {
2549		while (n) {
2550			if (!net_eq(dev_net(n->dev), net))
2551				goto next;
2552			if (state->neigh_sub_iter) {
2553				void *v = state->neigh_sub_iter(state, n, pos);
2554				if (v)
2555					return n;
2556				goto next;
2557			}
2558			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2559				break;
2560
2561			if (n->nud_state & ~NUD_NOARP)
2562				break;
2563next:
2564			n = rcu_dereference_bh(n->next);
2565		}
2566
2567		if (n)
2568			break;
2569
2570		if (++state->bucket >= (1 << nht->hash_shift))
2571			break;
2572
2573		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2574	}
2575
2576	if (n && pos)
2577		--(*pos);
2578	return n;
2579}
2580
2581static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2582{
2583	struct neighbour *n = neigh_get_first(seq);
2584
2585	if (n) {
2586		--(*pos);
2587		while (*pos) {
2588			n = neigh_get_next(seq, n, pos);
2589			if (!n)
2590				break;
2591		}
2592	}
2593	return *pos ? NULL : n;
2594}
2595
2596static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2597{
2598	struct neigh_seq_state *state = seq->private;
2599	struct net *net = seq_file_net(seq);
2600	struct neigh_table *tbl = state->tbl;
2601	struct pneigh_entry *pn = NULL;
2602	int bucket = state->bucket;
2603
2604	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2605	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2606		pn = tbl->phash_buckets[bucket];
2607		while (pn && !net_eq(pneigh_net(pn), net))
2608			pn = pn->next;
2609		if (pn)
2610			break;
2611	}
2612	state->bucket = bucket;
2613
2614	return pn;
2615}
2616
2617static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2618					    struct pneigh_entry *pn,
2619					    loff_t *pos)
2620{
2621	struct neigh_seq_state *state = seq->private;
2622	struct net *net = seq_file_net(seq);
2623	struct neigh_table *tbl = state->tbl;
2624
2625	do {
2626		pn = pn->next;
2627	} while (pn && !net_eq(pneigh_net(pn), net));
2628
2629	while (!pn) {
2630		if (++state->bucket > PNEIGH_HASHMASK)
2631			break;
2632		pn = tbl->phash_buckets[state->bucket];
2633		while (pn && !net_eq(pneigh_net(pn), net))
2634			pn = pn->next;
2635		if (pn)
2636			break;
2637	}
2638
2639	if (pn && pos)
2640		--(*pos);
2641
2642	return pn;
2643}
2644
2645static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2646{
2647	struct pneigh_entry *pn = pneigh_get_first(seq);
2648
2649	if (pn) {
2650		--(*pos);
2651		while (*pos) {
2652			pn = pneigh_get_next(seq, pn, pos);
2653			if (!pn)
2654				break;
2655		}
2656	}
2657	return *pos ? NULL : pn;
2658}
2659
2660static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2661{
2662	struct neigh_seq_state *state = seq->private;
2663	void *rc;
2664	loff_t idxpos = *pos;
2665
2666	rc = neigh_get_idx(seq, &idxpos);
2667	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2668		rc = pneigh_get_idx(seq, &idxpos);
2669
2670	return rc;
2671}
2672
2673void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2674	__acquires(rcu_bh)
2675{
2676	struct neigh_seq_state *state = seq->private;
2677
2678	state->tbl = tbl;
2679	state->bucket = 0;
2680	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2681
2682	rcu_read_lock_bh();
2683	state->nht = rcu_dereference_bh(tbl->nht);
2684
2685	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2686}
2687EXPORT_SYMBOL(neigh_seq_start);
2688
2689void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2690{
2691	struct neigh_seq_state *state;
2692	void *rc;
2693
2694	if (v == SEQ_START_TOKEN) {
2695		rc = neigh_get_first(seq);
2696		goto out;
2697	}
2698
2699	state = seq->private;
2700	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2701		rc = neigh_get_next(seq, v, NULL);
2702		if (rc)
2703			goto out;
2704		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2705			rc = pneigh_get_first(seq);
2706	} else {
2707		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2708		rc = pneigh_get_next(seq, v, NULL);
2709	}
2710out:
2711	++(*pos);
2712	return rc;
2713}
2714EXPORT_SYMBOL(neigh_seq_next);
2715
2716void neigh_seq_stop(struct seq_file *seq, void *v)
2717	__releases(rcu_bh)
2718{
2719	rcu_read_unlock_bh();
2720}
2721EXPORT_SYMBOL(neigh_seq_stop);
2722
2723/* statistics via seq_file */
2724
2725static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2726{
2727	struct neigh_table *tbl = seq->private;
2728	int cpu;
2729
2730	if (*pos == 0)
2731		return SEQ_START_TOKEN;
2732
2733	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2734		if (!cpu_possible(cpu))
2735			continue;
2736		*pos = cpu+1;
2737		return per_cpu_ptr(tbl->stats, cpu);
2738	}
2739	return NULL;
2740}
2741
2742static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2743{
2744	struct neigh_table *tbl = seq->private;
2745	int cpu;
2746
2747	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2748		if (!cpu_possible(cpu))
2749			continue;
2750		*pos = cpu+1;
2751		return per_cpu_ptr(tbl->stats, cpu);
2752	}
2753	return NULL;
2754}
2755
2756static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2757{
2758
2759}
2760
2761static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2762{
2763	struct neigh_table *tbl = seq->private;
2764	struct neigh_statistics *st = v;
2765
2766	if (v == SEQ_START_TOKEN) {
2767		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2768		return 0;
2769	}
2770
2771	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2772			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2773		   atomic_read(&tbl->entries),
2774
2775		   st->allocs,
2776		   st->destroys,
2777		   st->hash_grows,
2778
2779		   st->lookups,
2780		   st->hits,
2781
2782		   st->res_failed,
2783
2784		   st->rcv_probes_mcast,
2785		   st->rcv_probes_ucast,
2786
2787		   st->periodic_gc_runs,
2788		   st->forced_gc_runs,
2789		   st->unres_discards,
2790		   st->table_fulls
2791		   );
2792
2793	return 0;
2794}
2795
2796static const struct seq_operations neigh_stat_seq_ops = {
2797	.start	= neigh_stat_seq_start,
2798	.next	= neigh_stat_seq_next,
2799	.stop	= neigh_stat_seq_stop,
2800	.show	= neigh_stat_seq_show,
2801};
2802
2803static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2804{
2805	int ret = seq_open(file, &neigh_stat_seq_ops);
2806
2807	if (!ret) {
2808		struct seq_file *sf = file->private_data;
2809		sf->private = PDE_DATA(inode);
2810	}
2811	return ret;
2812};
2813
2814static const struct file_operations neigh_stat_seq_fops = {
2815	.owner	 = THIS_MODULE,
2816	.open 	 = neigh_stat_seq_open,
2817	.read	 = seq_read,
2818	.llseek	 = seq_lseek,
2819	.release = seq_release,
2820};
2821
2822#endif /* CONFIG_PROC_FS */
2823
2824static inline size_t neigh_nlmsg_size(void)
2825{
2826	return NLMSG_ALIGN(sizeof(struct ndmsg))
2827	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2828	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2829	       + nla_total_size(sizeof(struct nda_cacheinfo))
2830	       + nla_total_size(4); /* NDA_PROBES */
2831}
2832
2833static void __neigh_notify(struct neighbour *n, int type, int flags)
2834{
2835	struct net *net = dev_net(n->dev);
2836	struct sk_buff *skb;
2837	int err = -ENOBUFS;
2838
2839	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2840	if (skb == NULL)
2841		goto errout;
2842
2843	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2844	if (err < 0) {
2845		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2846		WARN_ON(err == -EMSGSIZE);
2847		kfree_skb(skb);
2848		goto errout;
2849	}
2850	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2851	return;
2852errout:
2853	if (err < 0)
2854		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2855}
2856
2857void neigh_app_ns(struct neighbour *n)
2858{
2859	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2860}
2861EXPORT_SYMBOL(neigh_app_ns);
2862
2863#ifdef CONFIG_SYSCTL
2864static int zero;
2865static int int_max = INT_MAX;
2866static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2867
2868static int proc_unres_qlen(struct ctl_table *ctl, int write,
2869			   void __user *buffer, size_t *lenp, loff_t *ppos)
2870{
2871	int size, ret;
2872	struct ctl_table tmp = *ctl;
2873
2874	tmp.extra1 = &zero;
2875	tmp.extra2 = &unres_qlen_max;
2876	tmp.data = &size;
2877
2878	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2879	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2880
2881	if (write && !ret)
2882		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2883	return ret;
2884}
2885
2886static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2887						   int family)
2888{
2889	switch (family) {
2890	case AF_INET:
2891		return __in_dev_arp_parms_get_rcu(dev);
2892	case AF_INET6:
2893		return __in6_dev_nd_parms_get_rcu(dev);
2894	}
2895	return NULL;
2896}
2897
2898static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2899				  int index)
2900{
2901	struct net_device *dev;
2902	int family = neigh_parms_family(p);
2903
2904	rcu_read_lock();
2905	for_each_netdev_rcu(net, dev) {
2906		struct neigh_parms *dst_p =
2907				neigh_get_dev_parms_rcu(dev, family);
2908
2909		if (dst_p && !test_bit(index, dst_p->data_state))
2910			dst_p->data[index] = p->data[index];
2911	}
2912	rcu_read_unlock();
2913}
2914
2915static void neigh_proc_update(struct ctl_table *ctl, int write)
2916{
2917	struct net_device *dev = ctl->extra1;
2918	struct neigh_parms *p = ctl->extra2;
2919	struct net *net = neigh_parms_net(p);
2920	int index = (int *) ctl->data - p->data;
2921
2922	if (!write)
2923		return;
2924
2925	set_bit(index, p->data_state);
2926	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2927		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2928	if (!dev) /* NULL dev means this is default value */
2929		neigh_copy_dflt_parms(net, p, index);
2930}
2931
2932static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2933					   void __user *buffer,
2934					   size_t *lenp, loff_t *ppos)
2935{
2936	struct ctl_table tmp = *ctl;
2937	int ret;
2938
2939	tmp.extra1 = &zero;
2940	tmp.extra2 = &int_max;
2941
2942	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2943	neigh_proc_update(ctl, write);
2944	return ret;
2945}
2946
2947int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2948			void __user *buffer, size_t *lenp, loff_t *ppos)
2949{
2950	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2951
2952	neigh_proc_update(ctl, write);
2953	return ret;
2954}
2955EXPORT_SYMBOL(neigh_proc_dointvec);
2956
2957int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2958				void __user *buffer,
2959				size_t *lenp, loff_t *ppos)
2960{
2961	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2962
2963	neigh_proc_update(ctl, write);
2964	return ret;
2965}
2966EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2967
2968static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2969					      void __user *buffer,
2970					      size_t *lenp, loff_t *ppos)
2971{
2972	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2973
2974	neigh_proc_update(ctl, write);
2975	return ret;
2976}
2977
2978int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2979				   void __user *buffer,
2980				   size_t *lenp, loff_t *ppos)
2981{
2982	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2983
2984	neigh_proc_update(ctl, write);
2985	return ret;
2986}
2987EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2988
2989static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2990					  void __user *buffer,
2991					  size_t *lenp, loff_t *ppos)
2992{
2993	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2994
2995	neigh_proc_update(ctl, write);
2996	return ret;
2997}
2998
2999static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3000					  void __user *buffer,
3001					  size_t *lenp, loff_t *ppos)
3002{
3003	struct neigh_parms *p = ctl->extra2;
3004	int ret;
3005
3006	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3007		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3008	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3009		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3010	else
3011		ret = -1;
3012
3013	if (write && ret == 0) {
3014		/* update reachable_time as well, otherwise, the change will
3015		 * only be effective after the next time neigh_periodic_work
3016		 * decides to recompute it
3017		 */
3018		p->reachable_time =
3019			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3020	}
3021	return ret;
3022}
3023
3024#define NEIGH_PARMS_DATA_OFFSET(index)	\
3025	(&((struct neigh_parms *) 0)->data[index])
3026
3027#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3028	[NEIGH_VAR_ ## attr] = { \
3029		.procname	= name, \
3030		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3031		.maxlen		= sizeof(int), \
3032		.mode		= mval, \
3033		.proc_handler	= proc, \
3034	}
3035
3036#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3037	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3038
3039#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3040	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3041
3042#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3043	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3044
3045#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3046	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3047
3048#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3049	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3050
3051#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3052	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3053
3054static struct neigh_sysctl_table {
3055	struct ctl_table_header *sysctl_header;
3056	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3057} neigh_sysctl_template __read_mostly = {
3058	.neigh_vars = {
3059		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3060		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3061		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3062		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3063		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3064		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3065		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3066		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3067		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3068		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3069		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3070		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3071		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3072		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3073		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3074		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3075		[NEIGH_VAR_GC_INTERVAL] = {
3076			.procname	= "gc_interval",
3077			.maxlen		= sizeof(int),
3078			.mode		= 0644,
3079			.proc_handler	= proc_dointvec_jiffies,
3080		},
3081		[NEIGH_VAR_GC_THRESH1] = {
3082			.procname	= "gc_thresh1",
3083			.maxlen		= sizeof(int),
3084			.mode		= 0644,
3085			.extra1 	= &zero,
3086			.extra2		= &int_max,
3087			.proc_handler	= proc_dointvec_minmax,
3088		},
3089		[NEIGH_VAR_GC_THRESH2] = {
3090			.procname	= "gc_thresh2",
3091			.maxlen		= sizeof(int),
3092			.mode		= 0644,
3093			.extra1 	= &zero,
3094			.extra2		= &int_max,
3095			.proc_handler	= proc_dointvec_minmax,
3096		},
3097		[NEIGH_VAR_GC_THRESH3] = {
3098			.procname	= "gc_thresh3",
3099			.maxlen		= sizeof(int),
3100			.mode		= 0644,
3101			.extra1 	= &zero,
3102			.extra2		= &int_max,
3103			.proc_handler	= proc_dointvec_minmax,
3104		},
3105		{},
3106	},
3107};
3108
3109int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3110			  proc_handler *handler)
3111{
3112	int i;
3113	struct neigh_sysctl_table *t;
3114	const char *dev_name_source;
3115	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3116	char *p_name;
3117
3118	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3119	if (!t)
3120		goto err;
3121
3122	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3123		t->neigh_vars[i].data += (long) p;
3124		t->neigh_vars[i].extra1 = dev;
3125		t->neigh_vars[i].extra2 = p;
3126	}
3127
3128	if (dev) {
3129		dev_name_source = dev->name;
3130		/* Terminate the table early */
3131		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3132		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3133	} else {
3134		struct neigh_table *tbl = p->tbl;
3135		dev_name_source = "default";
3136		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3137		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3138		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3139		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3140	}
3141
3142	if (handler) {
3143		/* RetransTime */
3144		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3145		/* ReachableTime */
3146		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3147		/* RetransTime (in milliseconds)*/
3148		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3149		/* ReachableTime (in milliseconds) */
3150		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3151	} else {
3152		/* Those handlers will update p->reachable_time after
3153		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3154		 * applied after the next neighbour update instead of waiting for
3155		 * neigh_periodic_work to update its value (can be multiple minutes)
3156		 * So any handler that replaces them should do this as well
3157		 */
3158		/* ReachableTime */
3159		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3160			neigh_proc_base_reachable_time;
3161		/* ReachableTime (in milliseconds) */
3162		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3163			neigh_proc_base_reachable_time;
3164	}
3165
3166	/* Don't export sysctls to unprivileged users */
3167	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3168		t->neigh_vars[0].procname = NULL;
3169
3170	switch (neigh_parms_family(p)) {
3171	case AF_INET:
3172	      p_name = "ipv4";
3173	      break;
3174	case AF_INET6:
3175	      p_name = "ipv6";
3176	      break;
3177	default:
3178	      BUG();
3179	}
3180
3181	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3182		p_name, dev_name_source);
3183	t->sysctl_header =
3184		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3185	if (!t->sysctl_header)
3186		goto free;
3187
3188	p->sysctl_table = t;
3189	return 0;
3190
3191free:
3192	kfree(t);
3193err:
3194	return -ENOBUFS;
3195}
3196EXPORT_SYMBOL(neigh_sysctl_register);
3197
3198void neigh_sysctl_unregister(struct neigh_parms *p)
3199{
3200	if (p->sysctl_table) {
3201		struct neigh_sysctl_table *t = p->sysctl_table;
3202		p->sysctl_table = NULL;
3203		unregister_net_sysctl_table(t->sysctl_header);
3204		kfree(t);
3205	}
3206}
3207EXPORT_SYMBOL(neigh_sysctl_unregister);
3208
3209#endif	/* CONFIG_SYSCTL */
3210
3211static int __init neigh_init(void)
3212{
3213	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3214	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3215	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3216
3217	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3218		      NULL);
3219	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3220
3221	return 0;
3222}
3223
3224subsys_initcall(neigh_init);
3225