Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.1
   1/*
   2 *	Generic address resolution entity
   3 *
   4 *	Authors:
   5 *	Pedro Roque		<roque@di.fc.ul.pt>
   6 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   7 *
   8 *	This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 *
  13 *	Fixes:
  14 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  15 *	Harald Welte		Add neighbour cache statistics like rtstat
  16 */
  17
 
 
  18#include <linux/slab.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/module.h>
  22#include <linux/socket.h>
  23#include <linux/netdevice.h>
  24#include <linux/proc_fs.h>
  25#ifdef CONFIG_SYSCTL
  26#include <linux/sysctl.h>
  27#endif
  28#include <linux/times.h>
  29#include <net/net_namespace.h>
  30#include <net/neighbour.h>
  31#include <net/dst.h>
  32#include <net/sock.h>
  33#include <net/netevent.h>
  34#include <net/netlink.h>
  35#include <linux/rtnetlink.h>
  36#include <linux/random.h>
  37#include <linux/string.h>
  38#include <linux/log2.h>
 
 
  39
 
  40#define NEIGH_DEBUG 1
  41
  42#define NEIGH_PRINTK(x...) printk(x)
  43#define NEIGH_NOPRINTK(x...) do { ; } while(0)
  44#define NEIGH_PRINTK1 NEIGH_NOPRINTK
  45#define NEIGH_PRINTK2 NEIGH_NOPRINTK
  46
  47#if NEIGH_DEBUG >= 1
  48#undef NEIGH_PRINTK1
  49#define NEIGH_PRINTK1 NEIGH_PRINTK
  50#endif
  51#if NEIGH_DEBUG >= 2
  52#undef NEIGH_PRINTK2
  53#define NEIGH_PRINTK2 NEIGH_PRINTK
  54#endif
  55
  56#define PNEIGH_HASHMASK		0xF
  57
  58static void neigh_timer_handler(unsigned long arg);
  59static void __neigh_notify(struct neighbour *n, int type, int flags);
  60static void neigh_update_notify(struct neighbour *neigh);
  61static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
 
 
  62
  63static struct neigh_table *neigh_tables;
  64#ifdef CONFIG_PROC_FS
  65static const struct file_operations neigh_stat_seq_fops;
  66#endif
  67
  68/*
  69   Neighbour hash table buckets are protected with rwlock tbl->lock.
  70
  71   - All the scans/updates to hash buckets MUST be made under this lock.
  72   - NOTHING clever should be made under this lock: no callbacks
  73     to protocol backends, no attempts to send something to network.
  74     It will result in deadlocks, if backend/driver wants to use neighbour
  75     cache.
  76   - If the entry requires some non-trivial actions, increase
  77     its reference count and release table lock.
  78
  79   Neighbour entries are protected:
  80   - with reference count.
  81   - with rwlock neigh->lock
  82
  83   Reference count prevents destruction.
  84
  85   neigh->lock mainly serializes ll address data and its validity state.
  86   However, the same lock is used to protect another entry fields:
  87    - timer
  88    - resolution queue
  89
  90   Again, nothing clever shall be made under neigh->lock,
  91   the most complicated procedure, which we allow is dev->hard_header.
  92   It is supposed, that dev->hard_header is simplistic and does
  93   not make callbacks to neighbour tables.
  94
  95   The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
  96   list of neighbour tables. This list is used only in process context,
  97 */
  98
  99static DEFINE_RWLOCK(neigh_tbl_lock);
 100
 101static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
 102{
 103	kfree_skb(skb);
 104	return -ENETDOWN;
 105}
 106
 107static void neigh_cleanup_and_release(struct neighbour *neigh)
 108{
 109	if (neigh->parms->neigh_cleanup)
 110		neigh->parms->neigh_cleanup(neigh);
 111
 112	__neigh_notify(neigh, RTM_DELNEIGH, 0);
 
 113	neigh_release(neigh);
 114}
 115
 116/*
 117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 118 * It corresponds to default IPv6 settings and is not overridable,
 119 * because it is really reasonable choice.
 120 */
 121
 122unsigned long neigh_rand_reach_time(unsigned long base)
 123{
 124	return base ? (net_random() % base) + (base >> 1) : 0;
 125}
 126EXPORT_SYMBOL(neigh_rand_reach_time);
 127
 128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129static int neigh_forced_gc(struct neigh_table *tbl)
 130{
 131	int shrunk = 0;
 132	int i;
 133	struct neigh_hash_table *nht;
 134
 135	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 136
 137	write_lock_bh(&tbl->lock);
 138	nht = rcu_dereference_protected(tbl->nht,
 139					lockdep_is_held(&tbl->lock));
 140	for (i = 0; i < (1 << nht->hash_shift); i++) {
 141		struct neighbour *n;
 142		struct neighbour __rcu **np;
 143
 144		np = &nht->hash_buckets[i];
 145		while ((n = rcu_dereference_protected(*np,
 146					lockdep_is_held(&tbl->lock))) != NULL) {
 147			/* Neighbour record may be discarded if:
 148			 * - nobody refers to it.
 149			 * - it is not permanent
 150			 */
 151			write_lock(&n->lock);
 152			if (atomic_read(&n->refcnt) == 1 &&
 153			    !(n->nud_state & NUD_PERMANENT)) {
 154				rcu_assign_pointer(*np,
 155					rcu_dereference_protected(n->next,
 156						  lockdep_is_held(&tbl->lock)));
 157				n->dead = 1;
 158				shrunk	= 1;
 159				write_unlock(&n->lock);
 160				neigh_cleanup_and_release(n);
 161				continue;
 162			}
 163			write_unlock(&n->lock);
 164			np = &n->next;
 165		}
 166	}
 167
 168	tbl->last_flush = jiffies;
 169
 170	write_unlock_bh(&tbl->lock);
 171
 172	return shrunk;
 173}
 174
 175static void neigh_add_timer(struct neighbour *n, unsigned long when)
 176{
 177	neigh_hold(n);
 178	if (unlikely(mod_timer(&n->timer, when))) {
 179		printk("NEIGH: BUG, double timer add, state is %x\n",
 180		       n->nud_state);
 181		dump_stack();
 182	}
 183}
 184
 185static int neigh_del_timer(struct neighbour *n)
 186{
 187	if ((n->nud_state & NUD_IN_TIMER) &&
 188	    del_timer(&n->timer)) {
 189		neigh_release(n);
 190		return 1;
 191	}
 192	return 0;
 193}
 194
 195static void pneigh_queue_purge(struct sk_buff_head *list)
 196{
 197	struct sk_buff *skb;
 198
 199	while ((skb = skb_dequeue(list)) != NULL) {
 200		dev_put(skb->dev);
 201		kfree_skb(skb);
 202	}
 203}
 204
 205static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 206{
 207	int i;
 208	struct neigh_hash_table *nht;
 209
 210	nht = rcu_dereference_protected(tbl->nht,
 211					lockdep_is_held(&tbl->lock));
 212
 213	for (i = 0; i < (1 << nht->hash_shift); i++) {
 214		struct neighbour *n;
 215		struct neighbour __rcu **np = &nht->hash_buckets[i];
 216
 217		while ((n = rcu_dereference_protected(*np,
 218					lockdep_is_held(&tbl->lock))) != NULL) {
 219			if (dev && n->dev != dev) {
 220				np = &n->next;
 221				continue;
 222			}
 223			rcu_assign_pointer(*np,
 224				   rcu_dereference_protected(n->next,
 225						lockdep_is_held(&tbl->lock)));
 226			write_lock(&n->lock);
 227			neigh_del_timer(n);
 228			n->dead = 1;
 229
 230			if (atomic_read(&n->refcnt) != 1) {
 231				/* The most unpleasant situation.
 232				   We must destroy neighbour entry,
 233				   but someone still uses it.
 234
 235				   The destroy will be delayed until
 236				   the last user releases us, but
 237				   we must kill timers etc. and move
 238				   it to safe state.
 239				 */
 240				skb_queue_purge(&n->arp_queue);
 
 241				n->output = neigh_blackhole;
 242				if (n->nud_state & NUD_VALID)
 243					n->nud_state = NUD_NOARP;
 244				else
 245					n->nud_state = NUD_NONE;
 246				NEIGH_PRINTK2("neigh %p is stray.\n", n);
 247			}
 248			write_unlock(&n->lock);
 249			neigh_cleanup_and_release(n);
 250		}
 251	}
 252}
 253
 254void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 255{
 256	write_lock_bh(&tbl->lock);
 257	neigh_flush_dev(tbl, dev);
 258	write_unlock_bh(&tbl->lock);
 259}
 260EXPORT_SYMBOL(neigh_changeaddr);
 261
 262int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 263{
 264	write_lock_bh(&tbl->lock);
 265	neigh_flush_dev(tbl, dev);
 266	pneigh_ifdown(tbl, dev);
 267	write_unlock_bh(&tbl->lock);
 268
 269	del_timer_sync(&tbl->proxy_timer);
 270	pneigh_queue_purge(&tbl->proxy_queue);
 271	return 0;
 272}
 273EXPORT_SYMBOL(neigh_ifdown);
 274
 275static struct neighbour *neigh_alloc(struct neigh_table *tbl)
 276{
 277	struct neighbour *n = NULL;
 278	unsigned long now = jiffies;
 279	int entries;
 280
 281	entries = atomic_inc_return(&tbl->entries) - 1;
 282	if (entries >= tbl->gc_thresh3 ||
 283	    (entries >= tbl->gc_thresh2 &&
 284	     time_after(now, tbl->last_flush + 5 * HZ))) {
 285		if (!neigh_forced_gc(tbl) &&
 286		    entries >= tbl->gc_thresh3)
 
 
 
 287			goto out_entries;
 
 288	}
 289
 290	n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
 291	if (!n)
 292		goto out_entries;
 293
 294	skb_queue_head_init(&n->arp_queue);
 295	rwlock_init(&n->lock);
 296	seqlock_init(&n->ha_lock);
 297	n->updated	  = n->used = now;
 298	n->nud_state	  = NUD_NONE;
 299	n->output	  = neigh_blackhole;
 300	seqlock_init(&n->hh.hh_lock);
 301	n->parms	  = neigh_parms_clone(&tbl->parms);
 302	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
 303
 304	NEIGH_CACHE_STAT_INC(tbl, allocs);
 305	n->tbl		  = tbl;
 306	atomic_set(&n->refcnt, 1);
 307	n->dead		  = 1;
 308out:
 309	return n;
 310
 311out_entries:
 312	atomic_dec(&tbl->entries);
 313	goto out;
 314}
 315
 
 
 
 
 
 316static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 317{
 318	size_t size = (1 << shift) * sizeof(struct neighbour *);
 319	struct neigh_hash_table *ret;
 320	struct neighbour __rcu **buckets;
 
 321
 322	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 323	if (!ret)
 324		return NULL;
 325	if (size <= PAGE_SIZE)
 326		buckets = kzalloc(size, GFP_ATOMIC);
 327	else
 328		buckets = (struct neighbour __rcu **)
 329			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 330					   get_order(size));
 331	if (!buckets) {
 332		kfree(ret);
 333		return NULL;
 334	}
 335	ret->hash_buckets = buckets;
 336	ret->hash_shift = shift;
 337	get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
 338	ret->hash_rnd |= 1;
 339	return ret;
 340}
 341
 342static void neigh_hash_free_rcu(struct rcu_head *head)
 343{
 344	struct neigh_hash_table *nht = container_of(head,
 345						    struct neigh_hash_table,
 346						    rcu);
 347	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 348	struct neighbour __rcu **buckets = nht->hash_buckets;
 349
 350	if (size <= PAGE_SIZE)
 351		kfree(buckets);
 352	else
 353		free_pages((unsigned long)buckets, get_order(size));
 354	kfree(nht);
 355}
 356
 357static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 358						unsigned long new_shift)
 359{
 360	unsigned int i, hash;
 361	struct neigh_hash_table *new_nht, *old_nht;
 362
 363	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 364
 365	old_nht = rcu_dereference_protected(tbl->nht,
 366					    lockdep_is_held(&tbl->lock));
 367	new_nht = neigh_hash_alloc(new_shift);
 368	if (!new_nht)
 369		return old_nht;
 370
 371	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 372		struct neighbour *n, *next;
 373
 374		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 375						   lockdep_is_held(&tbl->lock));
 376		     n != NULL;
 377		     n = next) {
 378			hash = tbl->hash(n->primary_key, n->dev,
 379					 new_nht->hash_rnd);
 380
 381			hash >>= (32 - new_nht->hash_shift);
 382			next = rcu_dereference_protected(n->next,
 383						lockdep_is_held(&tbl->lock));
 384
 385			rcu_assign_pointer(n->next,
 386					   rcu_dereference_protected(
 387						new_nht->hash_buckets[hash],
 388						lockdep_is_held(&tbl->lock)));
 389			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 390		}
 391	}
 392
 393	rcu_assign_pointer(tbl->nht, new_nht);
 394	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 395	return new_nht;
 396}
 397
 398struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 399			       struct net_device *dev)
 400{
 401	struct neighbour *n;
 402	int key_len = tbl->key_len;
 403	u32 hash_val;
 404	struct neigh_hash_table *nht;
 405
 406	NEIGH_CACHE_STAT_INC(tbl, lookups);
 407
 408	rcu_read_lock_bh();
 409	nht = rcu_dereference_bh(tbl->nht);
 410	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 411
 412	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 413	     n != NULL;
 414	     n = rcu_dereference_bh(n->next)) {
 415		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
 416			if (!atomic_inc_not_zero(&n->refcnt))
 417				n = NULL;
 418			NEIGH_CACHE_STAT_INC(tbl, hits);
 419			break;
 420		}
 421	}
 422
 423	rcu_read_unlock_bh();
 424	return n;
 425}
 426EXPORT_SYMBOL(neigh_lookup);
 427
 428struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 429				     const void *pkey)
 430{
 431	struct neighbour *n;
 432	int key_len = tbl->key_len;
 433	u32 hash_val;
 434	struct neigh_hash_table *nht;
 435
 436	NEIGH_CACHE_STAT_INC(tbl, lookups);
 437
 438	rcu_read_lock_bh();
 439	nht = rcu_dereference_bh(tbl->nht);
 440	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
 441
 442	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 443	     n != NULL;
 444	     n = rcu_dereference_bh(n->next)) {
 445		if (!memcmp(n->primary_key, pkey, key_len) &&
 446		    net_eq(dev_net(n->dev), net)) {
 447			if (!atomic_inc_not_zero(&n->refcnt))
 448				n = NULL;
 449			NEIGH_CACHE_STAT_INC(tbl, hits);
 450			break;
 451		}
 452	}
 453
 454	rcu_read_unlock_bh();
 455	return n;
 456}
 457EXPORT_SYMBOL(neigh_lookup_nodev);
 458
 459struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
 460			       struct net_device *dev)
 461{
 462	u32 hash_val;
 463	int key_len = tbl->key_len;
 464	int error;
 465	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
 466	struct neigh_hash_table *nht;
 467
 468	if (!n) {
 469		rc = ERR_PTR(-ENOBUFS);
 470		goto out;
 471	}
 472
 473	memcpy(n->primary_key, pkey, key_len);
 474	n->dev = dev;
 475	dev_hold(dev);
 476
 477	/* Protocol specific setup. */
 478	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 479		rc = ERR_PTR(error);
 480		goto out_neigh_release;
 481	}
 482
 
 
 
 
 
 
 
 
 483	/* Device specific setup. */
 484	if (n->parms->neigh_setup &&
 485	    (error = n->parms->neigh_setup(n)) < 0) {
 486		rc = ERR_PTR(error);
 487		goto out_neigh_release;
 488	}
 489
 490	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
 491
 492	write_lock_bh(&tbl->lock);
 493	nht = rcu_dereference_protected(tbl->nht,
 494					lockdep_is_held(&tbl->lock));
 495
 496	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 497		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 498
 499	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 500
 501	if (n->parms->dead) {
 502		rc = ERR_PTR(-EINVAL);
 503		goto out_tbl_unlock;
 504	}
 505
 506	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 507					    lockdep_is_held(&tbl->lock));
 508	     n1 != NULL;
 509	     n1 = rcu_dereference_protected(n1->next,
 510			lockdep_is_held(&tbl->lock))) {
 511		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
 512			neigh_hold(n1);
 
 513			rc = n1;
 514			goto out_tbl_unlock;
 515		}
 516	}
 517
 518	n->dead = 0;
 519	neigh_hold(n);
 
 520	rcu_assign_pointer(n->next,
 521			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 522						     lockdep_is_held(&tbl->lock)));
 523	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 524	write_unlock_bh(&tbl->lock);
 525	NEIGH_PRINTK2("neigh %p is created.\n", n);
 526	rc = n;
 527out:
 528	return rc;
 529out_tbl_unlock:
 530	write_unlock_bh(&tbl->lock);
 531out_neigh_release:
 532	neigh_release(n);
 533	goto out;
 534}
 535EXPORT_SYMBOL(neigh_create);
 536
 537static u32 pneigh_hash(const void *pkey, int key_len)
 538{
 539	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 540	hash_val ^= (hash_val >> 16);
 541	hash_val ^= hash_val >> 8;
 542	hash_val ^= hash_val >> 4;
 543	hash_val &= PNEIGH_HASHMASK;
 544	return hash_val;
 545}
 546
 547static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 548					      struct net *net,
 549					      const void *pkey,
 550					      int key_len,
 551					      struct net_device *dev)
 552{
 553	while (n) {
 554		if (!memcmp(n->key, pkey, key_len) &&
 555		    net_eq(pneigh_net(n), net) &&
 556		    (n->dev == dev || !n->dev))
 557			return n;
 558		n = n->next;
 559	}
 560	return NULL;
 561}
 562
 563struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 564		struct net *net, const void *pkey, struct net_device *dev)
 565{
 566	int key_len = tbl->key_len;
 567	u32 hash_val = pneigh_hash(pkey, key_len);
 568
 569	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 570				 net, pkey, key_len, dev);
 571}
 572EXPORT_SYMBOL_GPL(__pneigh_lookup);
 573
 574struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 575				    struct net *net, const void *pkey,
 576				    struct net_device *dev, int creat)
 577{
 578	struct pneigh_entry *n;
 579	int key_len = tbl->key_len;
 580	u32 hash_val = pneigh_hash(pkey, key_len);
 581
 582	read_lock_bh(&tbl->lock);
 583	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 584			      net, pkey, key_len, dev);
 585	read_unlock_bh(&tbl->lock);
 586
 587	if (n || !creat)
 588		goto out;
 589
 590	ASSERT_RTNL();
 591
 592	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
 593	if (!n)
 594		goto out;
 595
 596	write_pnet(&n->net, hold_net(net));
 597	memcpy(n->key, pkey, key_len);
 598	n->dev = dev;
 599	if (dev)
 600		dev_hold(dev);
 601
 602	if (tbl->pconstructor && tbl->pconstructor(n)) {
 603		if (dev)
 604			dev_put(dev);
 605		release_net(net);
 606		kfree(n);
 607		n = NULL;
 608		goto out;
 609	}
 610
 611	write_lock_bh(&tbl->lock);
 612	n->next = tbl->phash_buckets[hash_val];
 613	tbl->phash_buckets[hash_val] = n;
 614	write_unlock_bh(&tbl->lock);
 615out:
 616	return n;
 617}
 618EXPORT_SYMBOL(pneigh_lookup);
 619
 620
 621int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 622		  struct net_device *dev)
 623{
 624	struct pneigh_entry *n, **np;
 625	int key_len = tbl->key_len;
 626	u32 hash_val = pneigh_hash(pkey, key_len);
 627
 628	write_lock_bh(&tbl->lock);
 629	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 630	     np = &n->next) {
 631		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 632		    net_eq(pneigh_net(n), net)) {
 633			*np = n->next;
 634			write_unlock_bh(&tbl->lock);
 635			if (tbl->pdestructor)
 636				tbl->pdestructor(n);
 637			if (n->dev)
 638				dev_put(n->dev);
 639			release_net(pneigh_net(n));
 640			kfree(n);
 641			return 0;
 642		}
 643	}
 644	write_unlock_bh(&tbl->lock);
 645	return -ENOENT;
 646}
 647
 648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 
 649{
 650	struct pneigh_entry *n, **np;
 651	u32 h;
 652
 653	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 654		np = &tbl->phash_buckets[h];
 655		while ((n = *np) != NULL) {
 656			if (!dev || n->dev == dev) {
 657				*np = n->next;
 658				if (tbl->pdestructor)
 659					tbl->pdestructor(n);
 660				if (n->dev)
 661					dev_put(n->dev);
 662				release_net(pneigh_net(n));
 663				kfree(n);
 664				continue;
 665			}
 666			np = &n->next;
 667		}
 668	}
 
 
 
 
 
 
 
 
 
 
 669	return -ENOENT;
 670}
 671
 672static void neigh_parms_destroy(struct neigh_parms *parms);
 673
 674static inline void neigh_parms_put(struct neigh_parms *parms)
 675{
 676	if (atomic_dec_and_test(&parms->refcnt))
 677		neigh_parms_destroy(parms);
 678}
 679
 680static void neigh_destroy_rcu(struct rcu_head *head)
 681{
 682	struct neighbour *neigh = container_of(head, struct neighbour, rcu);
 683
 684	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
 685}
 686/*
 687 *	neighbour must already be out of the table;
 688 *
 689 */
 690void neigh_destroy(struct neighbour *neigh)
 691{
 
 
 692	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 693
 694	if (!neigh->dead) {
 695		printk(KERN_WARNING
 696		       "Destroying alive neighbour %p\n", neigh);
 697		dump_stack();
 698		return;
 699	}
 700
 701	if (neigh_del_timer(neigh))
 702		printk(KERN_WARNING "Impossible event.\n");
 
 
 
 
 
 703
 704	skb_queue_purge(&neigh->arp_queue);
 
 705
 706	dev_put(neigh->dev);
 707	neigh_parms_put(neigh->parms);
 708
 709	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
 710
 711	atomic_dec(&neigh->tbl->entries);
 712	call_rcu(&neigh->rcu, neigh_destroy_rcu);
 713}
 714EXPORT_SYMBOL(neigh_destroy);
 715
 716/* Neighbour state is suspicious;
 717   disable fast path.
 718
 719   Called with write_locked neigh.
 720 */
 721static void neigh_suspect(struct neighbour *neigh)
 722{
 723	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
 724
 725	neigh->output = neigh->ops->output;
 726}
 727
 728/* Neighbour state is OK;
 729   enable fast path.
 730
 731   Called with write_locked neigh.
 732 */
 733static void neigh_connect(struct neighbour *neigh)
 734{
 735	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
 736
 737	neigh->output = neigh->ops->connected_output;
 738}
 739
 740static void neigh_periodic_work(struct work_struct *work)
 741{
 742	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 743	struct neighbour *n;
 744	struct neighbour __rcu **np;
 745	unsigned int i;
 746	struct neigh_hash_table *nht;
 747
 748	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 749
 750	write_lock_bh(&tbl->lock);
 751	nht = rcu_dereference_protected(tbl->nht,
 752					lockdep_is_held(&tbl->lock));
 753
 754	/*
 755	 *	periodically recompute ReachableTime from random function
 756	 */
 757
 758	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 759		struct neigh_parms *p;
 760		tbl->last_rand = jiffies;
 761		for (p = &tbl->parms; p; p = p->next)
 762			p->reachable_time =
 763				neigh_rand_reach_time(p->base_reachable_time);
 764	}
 765
 
 
 
 766	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 767		np = &nht->hash_buckets[i];
 768
 769		while ((n = rcu_dereference_protected(*np,
 770				lockdep_is_held(&tbl->lock))) != NULL) {
 771			unsigned int state;
 772
 773			write_lock(&n->lock);
 774
 775			state = n->nud_state;
 776			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
 777				write_unlock(&n->lock);
 778				goto next_elt;
 779			}
 780
 781			if (time_before(n->used, n->confirmed))
 782				n->used = n->confirmed;
 783
 784			if (atomic_read(&n->refcnt) == 1 &&
 785			    (state == NUD_FAILED ||
 786			     time_after(jiffies, n->used + n->parms->gc_staletime))) {
 787				*np = n->next;
 788				n->dead = 1;
 789				write_unlock(&n->lock);
 790				neigh_cleanup_and_release(n);
 791				continue;
 792			}
 793			write_unlock(&n->lock);
 794
 795next_elt:
 796			np = &n->next;
 797		}
 798		/*
 799		 * It's fine to release lock here, even if hash table
 800		 * grows while we are preempted.
 801		 */
 802		write_unlock_bh(&tbl->lock);
 803		cond_resched();
 804		write_lock_bh(&tbl->lock);
 
 
 805	}
 806	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
 807	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
 808	 * base_reachable_time.
 
 809	 */
 810	schedule_delayed_work(&tbl->gc_work,
 811			      tbl->parms.base_reachable_time >> 1);
 812	write_unlock_bh(&tbl->lock);
 813}
 814
 815static __inline__ int neigh_max_probes(struct neighbour *n)
 816{
 817	struct neigh_parms *p = n->parms;
 818	return (n->nud_state & NUD_PROBE) ?
 819		p->ucast_probes :
 820		p->ucast_probes + p->app_probes + p->mcast_probes;
 821}
 822
 823static void neigh_invalidate(struct neighbour *neigh)
 824	__releases(neigh->lock)
 825	__acquires(neigh->lock)
 826{
 827	struct sk_buff *skb;
 828
 829	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 830	NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
 831	neigh->updated = jiffies;
 832
 833	/* It is very thin place. report_unreachable is very complicated
 834	   routine. Particularly, it can hit the same neighbour entry!
 835
 836	   So that, we try to be accurate and avoid dead loop. --ANK
 837	 */
 838	while (neigh->nud_state == NUD_FAILED &&
 839	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
 840		write_unlock(&neigh->lock);
 841		neigh->ops->error_report(neigh, skb);
 842		write_lock(&neigh->lock);
 843	}
 844	skb_queue_purge(&neigh->arp_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845}
 846
 847/* Called when a timer expires for a neighbour entry. */
 848
 849static void neigh_timer_handler(unsigned long arg)
 850{
 851	unsigned long now, next;
 852	struct neighbour *neigh = (struct neighbour *)arg;
 853	unsigned state;
 854	int notify = 0;
 855
 856	write_lock(&neigh->lock);
 857
 858	state = neigh->nud_state;
 859	now = jiffies;
 860	next = now + HZ;
 861
 862	if (!(state & NUD_IN_TIMER)) {
 863#ifndef CONFIG_SMP
 864		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
 865#endif
 866		goto out;
 867	}
 868
 869	if (state & NUD_REACHABLE) {
 870		if (time_before_eq(now,
 871				   neigh->confirmed + neigh->parms->reachable_time)) {
 872			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
 873			next = neigh->confirmed + neigh->parms->reachable_time;
 874		} else if (time_before_eq(now,
 875					  neigh->used + neigh->parms->delay_probe_time)) {
 876			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
 
 877			neigh->nud_state = NUD_DELAY;
 878			neigh->updated = jiffies;
 879			neigh_suspect(neigh);
 880			next = now + neigh->parms->delay_probe_time;
 881		} else {
 882			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
 883			neigh->nud_state = NUD_STALE;
 884			neigh->updated = jiffies;
 885			neigh_suspect(neigh);
 886			notify = 1;
 887		}
 888	} else if (state & NUD_DELAY) {
 889		if (time_before_eq(now,
 890				   neigh->confirmed + neigh->parms->delay_probe_time)) {
 891			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
 
 892			neigh->nud_state = NUD_REACHABLE;
 893			neigh->updated = jiffies;
 894			neigh_connect(neigh);
 895			notify = 1;
 896			next = neigh->confirmed + neigh->parms->reachable_time;
 897		} else {
 898			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
 899			neigh->nud_state = NUD_PROBE;
 900			neigh->updated = jiffies;
 901			atomic_set(&neigh->probes, 0);
 902			next = now + neigh->parms->retrans_time;
 
 903		}
 904	} else {
 905		/* NUD_PROBE|NUD_INCOMPLETE */
 906		next = now + neigh->parms->retrans_time;
 907	}
 908
 909	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
 910	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
 911		neigh->nud_state = NUD_FAILED;
 912		notify = 1;
 913		neigh_invalidate(neigh);
 
 914	}
 915
 916	if (neigh->nud_state & NUD_IN_TIMER) {
 917		if (time_before(next, jiffies + HZ/2))
 918			next = jiffies + HZ/2;
 919		if (!mod_timer(&neigh->timer, next))
 920			neigh_hold(neigh);
 921	}
 922	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
 923		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
 924		/* keep skb alive even if arp_queue overflows */
 925		if (skb)
 926			skb = skb_copy(skb, GFP_ATOMIC);
 927		write_unlock(&neigh->lock);
 928		neigh->ops->solicit(neigh, skb);
 929		atomic_inc(&neigh->probes);
 930		kfree_skb(skb);
 931	} else {
 932out:
 933		write_unlock(&neigh->lock);
 934	}
 935
 936	if (notify)
 937		neigh_update_notify(neigh);
 938
 939	neigh_release(neigh);
 940}
 941
 942int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 943{
 944	int rc;
 945	unsigned long now;
 946
 947	write_lock_bh(&neigh->lock);
 948
 949	rc = 0;
 950	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
 951		goto out_unlock_bh;
 952
 953	now = jiffies;
 954
 955	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
 956		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
 957			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
 
 
 
 
 958			neigh->nud_state     = NUD_INCOMPLETE;
 959			neigh->updated = jiffies;
 960			neigh_add_timer(neigh, now + 1);
 
 
 
 961		} else {
 962			neigh->nud_state = NUD_FAILED;
 963			neigh->updated = jiffies;
 964			write_unlock_bh(&neigh->lock);
 965
 966			kfree_skb(skb);
 967			return 1;
 968		}
 969	} else if (neigh->nud_state & NUD_STALE) {
 970		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
 971		neigh->nud_state = NUD_DELAY;
 972		neigh->updated = jiffies;
 973		neigh_add_timer(neigh,
 974				jiffies + neigh->parms->delay_probe_time);
 975	}
 976
 977	if (neigh->nud_state == NUD_INCOMPLETE) {
 978		if (skb) {
 979			if (skb_queue_len(&neigh->arp_queue) >=
 980			    neigh->parms->queue_len) {
 981				struct sk_buff *buff;
 
 982				buff = __skb_dequeue(&neigh->arp_queue);
 
 
 
 983				kfree_skb(buff);
 984				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
 985			}
 986			skb_dst_force(skb);
 987			__skb_queue_tail(&neigh->arp_queue, skb);
 
 988		}
 989		rc = 1;
 990	}
 991out_unlock_bh:
 992	write_unlock_bh(&neigh->lock);
 
 
 
 
 993	return rc;
 
 
 
 
 
 
 
 994}
 995EXPORT_SYMBOL(__neigh_event_send);
 996
 997static void neigh_update_hhs(struct neighbour *neigh)
 998{
 999	struct hh_cache *hh;
1000	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1001		= NULL;
1002
1003	if (neigh->dev->header_ops)
1004		update = neigh->dev->header_ops->cache_update;
1005
1006	if (update) {
1007		hh = &neigh->hh;
1008		if (hh->hh_len) {
1009			write_seqlock_bh(&hh->hh_lock);
1010			update(hh, neigh->dev, neigh->ha);
1011			write_sequnlock_bh(&hh->hh_lock);
1012		}
1013	}
1014}
1015
1016
1017
1018/* Generic update routine.
1019   -- lladdr is new lladdr or NULL, if it is not supplied.
1020   -- new    is new state.
1021   -- flags
1022	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1023				if it is different.
1024	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1025				lladdr instead of overriding it
1026				if it is different.
1027				It also allows to retain current state
1028				if lladdr is unchanged.
1029	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1030
1031	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1032				NTF_ROUTER flag.
1033	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1034				a router.
1035
1036   Caller MUST hold reference count on the entry.
1037 */
1038
1039int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1040		 u32 flags)
1041{
1042	u8 old;
1043	int err;
1044	int notify = 0;
1045	struct net_device *dev;
1046	int update_isrouter = 0;
1047
1048	write_lock_bh(&neigh->lock);
1049
1050	dev    = neigh->dev;
1051	old    = neigh->nud_state;
1052	err    = -EPERM;
1053
1054	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1055	    (old & (NUD_NOARP | NUD_PERMANENT)))
1056		goto out;
 
 
1057
1058	if (!(new & NUD_VALID)) {
1059		neigh_del_timer(neigh);
1060		if (old & NUD_CONNECTED)
1061			neigh_suspect(neigh);
1062		neigh->nud_state = new;
1063		err = 0;
1064		notify = old & NUD_VALID;
1065		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1066		    (new & NUD_FAILED)) {
1067			neigh_invalidate(neigh);
1068			notify = 1;
1069		}
1070		goto out;
1071	}
1072
1073	/* Compare new lladdr with cached one */
1074	if (!dev->addr_len) {
1075		/* First case: device needs no address. */
1076		lladdr = neigh->ha;
1077	} else if (lladdr) {
1078		/* The second case: if something is already cached
1079		   and a new address is proposed:
1080		   - compare new & old
1081		   - if they are different, check override flag
1082		 */
1083		if ((old & NUD_VALID) &&
1084		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1085			lladdr = neigh->ha;
1086	} else {
1087		/* No address is supplied; if we know something,
1088		   use it, otherwise discard the request.
1089		 */
1090		err = -EINVAL;
1091		if (!(old & NUD_VALID))
1092			goto out;
1093		lladdr = neigh->ha;
1094	}
1095
1096	if (new & NUD_CONNECTED)
1097		neigh->confirmed = jiffies;
1098	neigh->updated = jiffies;
1099
1100	/* If entry was valid and address is not changed,
1101	   do not change entry state, if new one is STALE.
1102	 */
1103	err = 0;
1104	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1105	if (old & NUD_VALID) {
1106		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1107			update_isrouter = 0;
1108			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1109			    (old & NUD_CONNECTED)) {
1110				lladdr = neigh->ha;
1111				new = NUD_STALE;
1112			} else
1113				goto out;
1114		} else {
1115			if (lladdr == neigh->ha && new == NUD_STALE &&
1116			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1117			     (old & NUD_CONNECTED))
1118			    )
1119				new = old;
1120		}
1121	}
1122
 
 
 
 
 
 
 
 
 
 
1123	if (new != old) {
1124		neigh_del_timer(neigh);
 
 
1125		if (new & NUD_IN_TIMER)
1126			neigh_add_timer(neigh, (jiffies +
1127						((new & NUD_REACHABLE) ?
1128						 neigh->parms->reachable_time :
1129						 0)));
1130		neigh->nud_state = new;
 
1131	}
1132
1133	if (lladdr != neigh->ha) {
1134		write_seqlock(&neigh->ha_lock);
1135		memcpy(&neigh->ha, lladdr, dev->addr_len);
1136		write_sequnlock(&neigh->ha_lock);
1137		neigh_update_hhs(neigh);
1138		if (!(new & NUD_CONNECTED))
1139			neigh->confirmed = jiffies -
1140				      (neigh->parms->base_reachable_time << 1);
1141		notify = 1;
1142	}
1143	if (new == old)
1144		goto out;
1145	if (new & NUD_CONNECTED)
1146		neigh_connect(neigh);
1147	else
1148		neigh_suspect(neigh);
1149	if (!(old & NUD_VALID)) {
1150		struct sk_buff *skb;
1151
1152		/* Again: avoid dead loop if something went wrong */
1153
1154		while (neigh->nud_state & NUD_VALID &&
1155		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1156			struct dst_entry *dst = skb_dst(skb);
1157			struct neighbour *n2, *n1 = neigh;
1158			write_unlock_bh(&neigh->lock);
1159			/* On shaper/eql skb->dst->neighbour != neigh :( */
1160			if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
1161				n1 = n2;
 
 
 
 
 
 
 
 
 
 
 
 
 
1162			n1->output(n1, skb);
 
 
 
 
1163			write_lock_bh(&neigh->lock);
1164		}
1165		skb_queue_purge(&neigh->arp_queue);
 
1166	}
1167out:
1168	if (update_isrouter) {
1169		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1170			(neigh->flags | NTF_ROUTER) :
1171			(neigh->flags & ~NTF_ROUTER);
1172	}
1173	write_unlock_bh(&neigh->lock);
1174
1175	if (notify)
1176		neigh_update_notify(neigh);
1177
1178	return err;
1179}
1180EXPORT_SYMBOL(neigh_update);
1181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1183				 u8 *lladdr, void *saddr,
1184				 struct net_device *dev)
1185{
1186	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1187						 lladdr || !dev->addr_len);
1188	if (neigh)
1189		neigh_update(neigh, lladdr, NUD_STALE,
1190			     NEIGH_UPDATE_F_OVERRIDE);
1191	return neigh;
1192}
1193EXPORT_SYMBOL(neigh_event_ns);
1194
1195/* called with read_lock_bh(&n->lock); */
1196static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1197{
1198	struct net_device *dev = dst->dev;
1199	__be16 prot = dst->ops->protocol;
1200	struct hh_cache	*hh = &n->hh;
1201
1202	write_lock_bh(&n->lock);
1203
1204	/* Only one thread can come in here and initialize the
1205	 * hh_cache entry.
1206	 */
1207	if (!hh->hh_len)
1208		dev->header_ops->cache(n, hh, prot);
1209
1210	write_unlock_bh(&n->lock);
1211}
1212
1213/* This function can be used in contexts, where only old dev_queue_xmit
1214 * worked, f.e. if you want to override normal output path (eql, shaper),
1215 * but resolution is not made yet.
1216 */
1217
1218int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1219{
1220	struct net_device *dev = skb->dev;
1221
1222	__skb_pull(skb, skb_network_offset(skb));
1223
1224	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1225			    skb->len) < 0 &&
1226	    dev->header_ops->rebuild(skb))
1227		return 0;
1228
1229	return dev_queue_xmit(skb);
1230}
1231EXPORT_SYMBOL(neigh_compat_output);
1232
1233/* Slow and careful. */
1234
1235int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1236{
1237	struct dst_entry *dst = skb_dst(skb);
1238	int rc = 0;
1239
1240	if (!dst)
1241		goto discard;
1242
1243	__skb_pull(skb, skb_network_offset(skb));
1244
1245	if (!neigh_event_send(neigh, skb)) {
1246		int err;
1247		struct net_device *dev = neigh->dev;
1248		unsigned int seq;
1249
1250		if (dev->header_ops->cache && !neigh->hh.hh_len)
1251			neigh_hh_init(neigh, dst);
1252
1253		do {
 
1254			seq = read_seqbegin(&neigh->ha_lock);
1255			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1256					      neigh->ha, NULL, skb->len);
1257		} while (read_seqretry(&neigh->ha_lock, seq));
1258
1259		if (err >= 0)
1260			rc = dev_queue_xmit(skb);
1261		else
1262			goto out_kfree_skb;
1263	}
1264out:
1265	return rc;
1266discard:
1267	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1268		      dst, neigh);
1269out_kfree_skb:
1270	rc = -EINVAL;
1271	kfree_skb(skb);
1272	goto out;
1273}
1274EXPORT_SYMBOL(neigh_resolve_output);
1275
1276/* As fast as possible without hh cache */
1277
1278int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1279{
1280	struct net_device *dev = neigh->dev;
1281	unsigned int seq;
1282	int err;
1283
1284	__skb_pull(skb, skb_network_offset(skb));
1285
1286	do {
 
1287		seq = read_seqbegin(&neigh->ha_lock);
1288		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1289				      neigh->ha, NULL, skb->len);
1290	} while (read_seqretry(&neigh->ha_lock, seq));
1291
1292	if (err >= 0)
1293		err = dev_queue_xmit(skb);
1294	else {
1295		err = -EINVAL;
1296		kfree_skb(skb);
1297	}
1298	return err;
1299}
1300EXPORT_SYMBOL(neigh_connected_output);
1301
1302int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1303{
1304	return dev_queue_xmit(skb);
1305}
1306EXPORT_SYMBOL(neigh_direct_output);
1307
1308static void neigh_proxy_process(unsigned long arg)
1309{
1310	struct neigh_table *tbl = (struct neigh_table *)arg;
1311	long sched_next = 0;
1312	unsigned long now = jiffies;
1313	struct sk_buff *skb, *n;
1314
1315	spin_lock(&tbl->proxy_queue.lock);
1316
1317	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1318		long tdif = NEIGH_CB(skb)->sched_next - now;
1319
1320		if (tdif <= 0) {
1321			struct net_device *dev = skb->dev;
1322
1323			__skb_unlink(skb, &tbl->proxy_queue);
1324			if (tbl->proxy_redo && netif_running(dev)) {
1325				rcu_read_lock();
1326				tbl->proxy_redo(skb);
1327				rcu_read_unlock();
1328			} else {
1329				kfree_skb(skb);
1330			}
1331
1332			dev_put(dev);
1333		} else if (!sched_next || tdif < sched_next)
1334			sched_next = tdif;
1335	}
1336	del_timer(&tbl->proxy_timer);
1337	if (sched_next)
1338		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1339	spin_unlock(&tbl->proxy_queue.lock);
1340}
1341
1342void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1343		    struct sk_buff *skb)
1344{
1345	unsigned long now = jiffies;
1346	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1347
1348	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
 
 
 
1349		kfree_skb(skb);
1350		return;
1351	}
1352
1353	NEIGH_CB(skb)->sched_next = sched_next;
1354	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1355
1356	spin_lock(&tbl->proxy_queue.lock);
1357	if (del_timer(&tbl->proxy_timer)) {
1358		if (time_before(tbl->proxy_timer.expires, sched_next))
1359			sched_next = tbl->proxy_timer.expires;
1360	}
1361	skb_dst_drop(skb);
1362	dev_hold(skb->dev);
1363	__skb_queue_tail(&tbl->proxy_queue, skb);
1364	mod_timer(&tbl->proxy_timer, sched_next);
1365	spin_unlock(&tbl->proxy_queue.lock);
1366}
1367EXPORT_SYMBOL(pneigh_enqueue);
1368
1369static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1370						      struct net *net, int ifindex)
1371{
1372	struct neigh_parms *p;
1373
1374	for (p = &tbl->parms; p; p = p->next) {
1375		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1376		    (!p->dev && !ifindex))
1377			return p;
1378	}
1379
1380	return NULL;
1381}
1382
1383struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1384				      struct neigh_table *tbl)
1385{
1386	struct neigh_parms *p, *ref;
1387	struct net *net = dev_net(dev);
1388	const struct net_device_ops *ops = dev->netdev_ops;
1389
1390	ref = lookup_neigh_parms(tbl, net, 0);
1391	if (!ref)
1392		return NULL;
1393
1394	p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1395	if (p) {
1396		p->tbl		  = tbl;
1397		atomic_set(&p->refcnt, 1);
1398		p->reachable_time =
1399				neigh_rand_reach_time(p->base_reachable_time);
 
 
 
 
1400
1401		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
 
1402			kfree(p);
1403			return NULL;
1404		}
1405
1406		dev_hold(dev);
1407		p->dev = dev;
1408		write_pnet(&p->net, hold_net(net));
1409		p->sysctl_table = NULL;
1410		write_lock_bh(&tbl->lock);
1411		p->next		= tbl->parms.next;
1412		tbl->parms.next = p;
1413		write_unlock_bh(&tbl->lock);
 
 
1414	}
1415	return p;
1416}
1417EXPORT_SYMBOL(neigh_parms_alloc);
1418
1419static void neigh_rcu_free_parms(struct rcu_head *head)
1420{
1421	struct neigh_parms *parms =
1422		container_of(head, struct neigh_parms, rcu_head);
1423
1424	neigh_parms_put(parms);
1425}
1426
1427void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1428{
1429	struct neigh_parms **p;
1430
1431	if (!parms || parms == &tbl->parms)
1432		return;
1433	write_lock_bh(&tbl->lock);
1434	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1435		if (*p == parms) {
1436			*p = parms->next;
1437			parms->dead = 1;
1438			write_unlock_bh(&tbl->lock);
1439			if (parms->dev)
1440				dev_put(parms->dev);
1441			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1442			return;
1443		}
1444	}
1445	write_unlock_bh(&tbl->lock);
1446	NEIGH_PRINTK1("neigh_parms_release: not found\n");
 
 
1447}
1448EXPORT_SYMBOL(neigh_parms_release);
1449
1450static void neigh_parms_destroy(struct neigh_parms *parms)
1451{
1452	release_net(neigh_parms_net(parms));
1453	kfree(parms);
1454}
1455
1456static struct lock_class_key neigh_table_proxy_queue_class;
1457
1458void neigh_table_init_no_netlink(struct neigh_table *tbl)
 
 
1459{
1460	unsigned long now = jiffies;
1461	unsigned long phsize;
1462
 
 
1463	write_pnet(&tbl->parms.net, &init_net);
1464	atomic_set(&tbl->parms.refcnt, 1);
1465	tbl->parms.reachable_time =
1466			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1467
1468	if (!tbl->kmem_cachep)
1469		tbl->kmem_cachep =
1470			kmem_cache_create(tbl->id, tbl->entry_size, 0,
1471					  SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1472					  NULL);
1473	tbl->stats = alloc_percpu(struct neigh_statistics);
1474	if (!tbl->stats)
1475		panic("cannot create neighbour cache statistics");
1476
1477#ifdef CONFIG_PROC_FS
1478	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1479			      &neigh_stat_seq_fops, tbl))
1480		panic("cannot create neighbour proc dir entry");
1481#endif
1482
1483	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1484
1485	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1486	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1487
1488	if (!tbl->nht || !tbl->phash_buckets)
1489		panic("cannot allocate neighbour cache hashes");
1490
 
 
 
 
 
 
1491	rwlock_init(&tbl->lock);
1492	INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1493	schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1494	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
 
1495	skb_queue_head_init_class(&tbl->proxy_queue,
1496			&neigh_table_proxy_queue_class);
1497
1498	tbl->last_flush = now;
1499	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1500}
1501EXPORT_SYMBOL(neigh_table_init_no_netlink);
1502
1503void neigh_table_init(struct neigh_table *tbl)
1504{
1505	struct neigh_table *tmp;
1506
1507	neigh_table_init_no_netlink(tbl);
1508	write_lock(&neigh_tbl_lock);
1509	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1510		if (tmp->family == tbl->family)
1511			break;
1512	}
1513	tbl->next	= neigh_tables;
1514	neigh_tables	= tbl;
1515	write_unlock(&neigh_tbl_lock);
1516
1517	if (unlikely(tmp)) {
1518		printk(KERN_ERR "NEIGH: Registering multiple tables for "
1519		       "family %d\n", tbl->family);
1520		dump_stack();
1521	}
1522}
1523EXPORT_SYMBOL(neigh_table_init);
1524
1525int neigh_table_clear(struct neigh_table *tbl)
1526{
1527	struct neigh_table **tp;
1528
1529	/* It is not clean... Fix it to unload IPv6 module safely */
1530	cancel_delayed_work_sync(&tbl->gc_work);
1531	del_timer_sync(&tbl->proxy_timer);
1532	pneigh_queue_purge(&tbl->proxy_queue);
1533	neigh_ifdown(tbl, NULL);
1534	if (atomic_read(&tbl->entries))
1535		printk(KERN_CRIT "neighbour leakage\n");
1536	write_lock(&neigh_tbl_lock);
1537	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1538		if (*tp == tbl) {
1539			*tp = tbl->next;
1540			break;
1541		}
1542	}
1543	write_unlock(&neigh_tbl_lock);
1544
1545	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1546		 neigh_hash_free_rcu);
1547	tbl->nht = NULL;
1548
1549	kfree(tbl->phash_buckets);
1550	tbl->phash_buckets = NULL;
1551
1552	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1553
1554	free_percpu(tbl->stats);
1555	tbl->stats = NULL;
1556
1557	kmem_cache_destroy(tbl->kmem_cachep);
1558	tbl->kmem_cachep = NULL;
1559
1560	return 0;
1561}
1562EXPORT_SYMBOL(neigh_table_clear);
1563
1564static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1565{
1566	struct net *net = sock_net(skb->sk);
1567	struct ndmsg *ndm;
1568	struct nlattr *dst_attr;
1569	struct neigh_table *tbl;
 
1570	struct net_device *dev = NULL;
1571	int err = -EINVAL;
1572
1573	ASSERT_RTNL();
1574	if (nlmsg_len(nlh) < sizeof(*ndm))
1575		goto out;
1576
1577	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1578	if (dst_attr == NULL)
1579		goto out;
1580
1581	ndm = nlmsg_data(nlh);
1582	if (ndm->ndm_ifindex) {
1583		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1584		if (dev == NULL) {
1585			err = -ENODEV;
1586			goto out;
1587		}
1588	}
1589
1590	read_lock(&neigh_tbl_lock);
1591	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1592		struct neighbour *neigh;
1593
1594		if (tbl->family != ndm->ndm_family)
1595			continue;
1596		read_unlock(&neigh_tbl_lock);
1597
1598		if (nla_len(dst_attr) < tbl->key_len)
1599			goto out;
1600
1601		if (ndm->ndm_flags & NTF_PROXY) {
1602			err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1603			goto out;
1604		}
1605
1606		if (dev == NULL)
1607			goto out;
 
 
1608
1609		neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1610		if (neigh == NULL) {
1611			err = -ENOENT;
1612			goto out;
1613		}
1614
1615		err = neigh_update(neigh, NULL, NUD_FAILED,
1616				   NEIGH_UPDATE_F_OVERRIDE |
1617				   NEIGH_UPDATE_F_ADMIN);
1618		neigh_release(neigh);
1619		goto out;
1620	}
1621	read_unlock(&neigh_tbl_lock);
1622	err = -EAFNOSUPPORT;
 
 
 
 
 
 
 
1623
1624out:
1625	return err;
1626}
1627
1628static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
1629{
 
1630	struct net *net = sock_net(skb->sk);
1631	struct ndmsg *ndm;
1632	struct nlattr *tb[NDA_MAX+1];
1633	struct neigh_table *tbl;
1634	struct net_device *dev = NULL;
 
 
1635	int err;
1636
1637	ASSERT_RTNL();
1638	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1639	if (err < 0)
1640		goto out;
1641
1642	err = -EINVAL;
1643	if (tb[NDA_DST] == NULL)
1644		goto out;
1645
1646	ndm = nlmsg_data(nlh);
1647	if (ndm->ndm_ifindex) {
1648		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1649		if (dev == NULL) {
1650			err = -ENODEV;
1651			goto out;
1652		}
1653
1654		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1655			goto out;
1656	}
1657
1658	read_lock(&neigh_tbl_lock);
1659	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1660		int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1661		struct neighbour *neigh;
1662		void *dst, *lladdr;
1663
1664		if (tbl->family != ndm->ndm_family)
1665			continue;
1666		read_unlock(&neigh_tbl_lock);
 
1667
1668		if (nla_len(tb[NDA_DST]) < tbl->key_len)
1669			goto out;
1670		dst = nla_data(tb[NDA_DST]);
1671		lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1672
1673		if (ndm->ndm_flags & NTF_PROXY) {
1674			struct pneigh_entry *pn;
 
 
 
 
 
 
1675
1676			err = -ENOBUFS;
1677			pn = pneigh_lookup(tbl, net, dst, dev, 1);
1678			if (pn) {
1679				pn->flags = ndm->ndm_flags;
1680				err = 0;
1681			}
 
1682			goto out;
1683		}
1684
1685		if (dev == NULL)
 
 
 
 
 
 
 
 
1686			goto out;
1687
1688		neigh = neigh_lookup(tbl, dst, dev);
1689		if (neigh == NULL) {
1690			if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1691				err = -ENOENT;
1692				goto out;
1693			}
1694
1695			neigh = __neigh_lookup_errno(tbl, dst, dev);
1696			if (IS_ERR(neigh)) {
1697				err = PTR_ERR(neigh);
1698				goto out;
1699			}
1700		} else {
1701			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1702				err = -EEXIST;
1703				neigh_release(neigh);
1704				goto out;
1705			}
1706
1707			if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1708				flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1709		}
1710
1711		if (ndm->ndm_flags & NTF_USE) {
1712			neigh_event_send(neigh, NULL);
1713			err = 0;
1714		} else
1715			err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1716		neigh_release(neigh);
1717		goto out;
1718	}
1719
1720	read_unlock(&neigh_tbl_lock);
1721	err = -EAFNOSUPPORT;
 
 
 
 
 
 
1722out:
1723	return err;
1724}
1725
1726static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1727{
1728	struct nlattr *nest;
1729
1730	nest = nla_nest_start(skb, NDTA_PARMS);
1731	if (nest == NULL)
1732		return -ENOBUFS;
1733
1734	if (parms->dev)
1735		NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1736
1737	NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1738	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1739	NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1740	NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1741	NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1742	NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1743	NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1744	NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1745		      parms->base_reachable_time);
1746	NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1747	NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1748	NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1749	NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1750	NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1751	NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1753	return nla_nest_end(skb, nest);
1754
1755nla_put_failure:
1756	nla_nest_cancel(skb, nest);
1757	return -EMSGSIZE;
1758}
1759
1760static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1761			      u32 pid, u32 seq, int type, int flags)
1762{
1763	struct nlmsghdr *nlh;
1764	struct ndtmsg *ndtmsg;
1765
1766	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1767	if (nlh == NULL)
1768		return -EMSGSIZE;
1769
1770	ndtmsg = nlmsg_data(nlh);
1771
1772	read_lock_bh(&tbl->lock);
1773	ndtmsg->ndtm_family = tbl->family;
1774	ndtmsg->ndtm_pad1   = 0;
1775	ndtmsg->ndtm_pad2   = 0;
1776
1777	NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1778	NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1779	NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1780	NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1781	NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1782
1783	{
1784		unsigned long now = jiffies;
1785		unsigned int flush_delta = now - tbl->last_flush;
1786		unsigned int rand_delta = now - tbl->last_rand;
1787		struct neigh_hash_table *nht;
1788		struct ndt_config ndc = {
1789			.ndtc_key_len		= tbl->key_len,
1790			.ndtc_entry_size	= tbl->entry_size,
1791			.ndtc_entries		= atomic_read(&tbl->entries),
1792			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1793			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1794			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1795		};
1796
1797		rcu_read_lock_bh();
1798		nht = rcu_dereference_bh(tbl->nht);
1799		ndc.ndtc_hash_rnd = nht->hash_rnd;
1800		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1801		rcu_read_unlock_bh();
1802
1803		NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
 
1804	}
1805
1806	{
1807		int cpu;
1808		struct ndt_stats ndst;
1809
1810		memset(&ndst, 0, sizeof(ndst));
1811
1812		for_each_possible_cpu(cpu) {
1813			struct neigh_statistics	*st;
1814
1815			st = per_cpu_ptr(tbl->stats, cpu);
1816			ndst.ndts_allocs		+= st->allocs;
1817			ndst.ndts_destroys		+= st->destroys;
1818			ndst.ndts_hash_grows		+= st->hash_grows;
1819			ndst.ndts_res_failed		+= st->res_failed;
1820			ndst.ndts_lookups		+= st->lookups;
1821			ndst.ndts_hits			+= st->hits;
1822			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1823			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1824			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1825			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
 
1826		}
1827
1828		NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
 
 
1829	}
1830
1831	BUG_ON(tbl->parms.dev);
1832	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1833		goto nla_put_failure;
1834
1835	read_unlock_bh(&tbl->lock);
1836	return nlmsg_end(skb, nlh);
 
1837
1838nla_put_failure:
1839	read_unlock_bh(&tbl->lock);
1840	nlmsg_cancel(skb, nlh);
1841	return -EMSGSIZE;
1842}
1843
1844static int neightbl_fill_param_info(struct sk_buff *skb,
1845				    struct neigh_table *tbl,
1846				    struct neigh_parms *parms,
1847				    u32 pid, u32 seq, int type,
1848				    unsigned int flags)
1849{
1850	struct ndtmsg *ndtmsg;
1851	struct nlmsghdr *nlh;
1852
1853	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1854	if (nlh == NULL)
1855		return -EMSGSIZE;
1856
1857	ndtmsg = nlmsg_data(nlh);
1858
1859	read_lock_bh(&tbl->lock);
1860	ndtmsg->ndtm_family = tbl->family;
1861	ndtmsg->ndtm_pad1   = 0;
1862	ndtmsg->ndtm_pad2   = 0;
1863
1864	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1865	    neightbl_fill_parms(skb, parms) < 0)
1866		goto errout;
1867
1868	read_unlock_bh(&tbl->lock);
1869	return nlmsg_end(skb, nlh);
 
1870errout:
1871	read_unlock_bh(&tbl->lock);
1872	nlmsg_cancel(skb, nlh);
1873	return -EMSGSIZE;
1874}
1875
1876static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1877	[NDTA_NAME]		= { .type = NLA_STRING },
1878	[NDTA_THRESH1]		= { .type = NLA_U32 },
1879	[NDTA_THRESH2]		= { .type = NLA_U32 },
1880	[NDTA_THRESH3]		= { .type = NLA_U32 },
1881	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1882	[NDTA_PARMS]		= { .type = NLA_NESTED },
1883};
1884
1885static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1886	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1887	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1888	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1889	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1890	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1891	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
 
1892	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1893	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1894	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1895	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1896	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1897	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1898	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1899};
1900
1901static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
1902{
1903	struct net *net = sock_net(skb->sk);
1904	struct neigh_table *tbl;
1905	struct ndtmsg *ndtmsg;
1906	struct nlattr *tb[NDTA_MAX+1];
1907	int err;
 
1908
1909	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1910			  nl_neightbl_policy);
1911	if (err < 0)
1912		goto errout;
1913
1914	if (tb[NDTA_NAME] == NULL) {
1915		err = -EINVAL;
1916		goto errout;
1917	}
1918
1919	ndtmsg = nlmsg_data(nlh);
1920	read_lock(&neigh_tbl_lock);
1921	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
 
 
 
1922		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1923			continue;
1924
1925		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1926			break;
 
1927	}
1928
1929	if (tbl == NULL) {
1930		err = -ENOENT;
1931		goto errout_locked;
1932	}
1933
1934	/*
1935	 * We acquire tbl->lock to be nice to the periodic timers and
1936	 * make sure they always see a consistent set of values.
1937	 */
1938	write_lock_bh(&tbl->lock);
1939
1940	if (tb[NDTA_PARMS]) {
1941		struct nlattr *tbp[NDTPA_MAX+1];
1942		struct neigh_parms *p;
1943		int i, ifindex = 0;
1944
1945		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1946				       nl_ntbl_parm_policy);
1947		if (err < 0)
1948			goto errout_tbl_lock;
1949
1950		if (tbp[NDTPA_IFINDEX])
1951			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1952
1953		p = lookup_neigh_parms(tbl, net, ifindex);
1954		if (p == NULL) {
1955			err = -ENOENT;
1956			goto errout_tbl_lock;
1957		}
1958
1959		for (i = 1; i <= NDTPA_MAX; i++) {
1960			if (tbp[i] == NULL)
1961				continue;
1962
1963			switch (i) {
1964			case NDTPA_QUEUE_LEN:
1965				p->queue_len = nla_get_u32(tbp[i]);
 
 
 
 
 
 
1966				break;
1967			case NDTPA_PROXY_QLEN:
1968				p->proxy_qlen = nla_get_u32(tbp[i]);
 
1969				break;
1970			case NDTPA_APP_PROBES:
1971				p->app_probes = nla_get_u32(tbp[i]);
 
1972				break;
1973			case NDTPA_UCAST_PROBES:
1974				p->ucast_probes = nla_get_u32(tbp[i]);
 
1975				break;
1976			case NDTPA_MCAST_PROBES:
1977				p->mcast_probes = nla_get_u32(tbp[i]);
 
 
 
 
 
1978				break;
1979			case NDTPA_BASE_REACHABLE_TIME:
1980				p->base_reachable_time = nla_get_msecs(tbp[i]);
 
 
 
 
 
 
 
1981				break;
1982			case NDTPA_GC_STALETIME:
1983				p->gc_staletime = nla_get_msecs(tbp[i]);
 
1984				break;
1985			case NDTPA_DELAY_PROBE_TIME:
1986				p->delay_probe_time = nla_get_msecs(tbp[i]);
 
 
1987				break;
1988			case NDTPA_RETRANS_TIME:
1989				p->retrans_time = nla_get_msecs(tbp[i]);
 
1990				break;
1991			case NDTPA_ANYCAST_DELAY:
1992				p->anycast_delay = nla_get_msecs(tbp[i]);
 
1993				break;
1994			case NDTPA_PROXY_DELAY:
1995				p->proxy_delay = nla_get_msecs(tbp[i]);
 
1996				break;
1997			case NDTPA_LOCKTIME:
1998				p->locktime = nla_get_msecs(tbp[i]);
 
1999				break;
2000			}
2001		}
2002	}
2003
 
 
 
 
 
 
2004	if (tb[NDTA_THRESH1])
2005		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2006
2007	if (tb[NDTA_THRESH2])
2008		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2009
2010	if (tb[NDTA_THRESH3])
2011		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2012
2013	if (tb[NDTA_GC_INTERVAL])
2014		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2015
2016	err = 0;
2017
2018errout_tbl_lock:
2019	write_unlock_bh(&tbl->lock);
2020errout_locked:
2021	read_unlock(&neigh_tbl_lock);
2022errout:
2023	return err;
2024}
2025
2026static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2027{
2028	struct net *net = sock_net(skb->sk);
2029	int family, tidx, nidx = 0;
2030	int tbl_skip = cb->args[0];
2031	int neigh_skip = cb->args[1];
2032	struct neigh_table *tbl;
2033
2034	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2035
2036	read_lock(&neigh_tbl_lock);
2037	for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2038		struct neigh_parms *p;
2039
 
 
 
 
2040		if (tidx < tbl_skip || (family && tbl->family != family))
2041			continue;
2042
2043		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2044				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2045				       NLM_F_MULTI) <= 0)
2046			break;
2047
2048		for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
 
 
2049			if (!net_eq(neigh_parms_net(p), net))
2050				continue;
2051
2052			if (nidx < neigh_skip)
2053				goto next;
2054
2055			if (neightbl_fill_param_info(skb, tbl, p,
2056						     NETLINK_CB(cb->skb).pid,
2057						     cb->nlh->nlmsg_seq,
2058						     RTM_NEWNEIGHTBL,
2059						     NLM_F_MULTI) <= 0)
2060				goto out;
2061		next:
2062			nidx++;
2063		}
2064
2065		neigh_skip = 0;
2066	}
2067out:
2068	read_unlock(&neigh_tbl_lock);
2069	cb->args[0] = tidx;
2070	cb->args[1] = nidx;
2071
2072	return skb->len;
2073}
2074
2075static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2076			   u32 pid, u32 seq, int type, unsigned int flags)
2077{
2078	unsigned long now = jiffies;
2079	struct nda_cacheinfo ci;
2080	struct nlmsghdr *nlh;
2081	struct ndmsg *ndm;
2082
2083	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2084	if (nlh == NULL)
2085		return -EMSGSIZE;
2086
2087	ndm = nlmsg_data(nlh);
2088	ndm->ndm_family	 = neigh->ops->family;
2089	ndm->ndm_pad1    = 0;
2090	ndm->ndm_pad2    = 0;
2091	ndm->ndm_flags	 = neigh->flags;
2092	ndm->ndm_type	 = neigh->type;
2093	ndm->ndm_ifindex = neigh->dev->ifindex;
2094
2095	NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
 
2096
2097	read_lock_bh(&neigh->lock);
2098	ndm->ndm_state	 = neigh->nud_state;
2099	if (neigh->nud_state & NUD_VALID) {
2100		char haddr[MAX_ADDR_LEN];
2101
2102		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2103		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2104			read_unlock_bh(&neigh->lock);
2105			goto nla_put_failure;
2106		}
2107	}
2108
2109	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2110	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2111	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2112	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2113	read_unlock_bh(&neigh->lock);
2114
2115	NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2116	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
 
2117
2118	return nlmsg_end(skb, nlh);
 
2119
2120nla_put_failure:
2121	nlmsg_cancel(skb, nlh);
2122	return -EMSGSIZE;
2123}
2124
2125static void neigh_update_notify(struct neighbour *neigh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126{
2127	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2128	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2129}
2130
2131static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2132			    struct netlink_callback *cb)
2133{
2134	struct net *net = sock_net(skb->sk);
 
 
2135	struct neighbour *n;
2136	int rc, h, s_h = cb->args[1];
2137	int idx, s_idx = idx = cb->args[2];
2138	struct neigh_hash_table *nht;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139
2140	rcu_read_lock_bh();
2141	nht = rcu_dereference_bh(tbl->nht);
2142
2143	for (h = 0; h < (1 << nht->hash_shift); h++) {
2144		if (h < s_h)
2145			continue;
2146		if (h > s_h)
2147			s_idx = 0;
2148		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2149		     n != NULL;
2150		     n = rcu_dereference_bh(n->next)) {
2151			if (!net_eq(dev_net(n->dev), net))
2152				continue;
2153			if (idx < s_idx)
 
2154				goto next;
2155			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2156					    cb->nlh->nlmsg_seq,
2157					    RTM_NEWNEIGH,
2158					    NLM_F_MULTI) <= 0) {
2159				rc = -1;
2160				goto out;
2161			}
2162next:
2163			idx++;
2164		}
2165	}
2166	rc = skb->len;
2167out:
2168	rcu_read_unlock_bh();
2169	cb->args[1] = h;
2170	cb->args[2] = idx;
2171	return rc;
2172}
2173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2174static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2175{
2176	struct neigh_table *tbl;
2177	int t, family, s_t;
 
 
2178
2179	read_lock(&neigh_tbl_lock);
2180	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
 
 
 
 
 
 
 
 
2181	s_t = cb->args[0];
2182
2183	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
 
 
 
 
2184		if (t < s_t || (family && tbl->family != family))
2185			continue;
2186		if (t > s_t)
2187			memset(&cb->args[1], 0, sizeof(cb->args) -
2188						sizeof(cb->args[0]));
2189		if (neigh_dump_table(tbl, skb, cb) < 0)
 
 
 
 
2190			break;
2191	}
2192	read_unlock(&neigh_tbl_lock);
2193
2194	cb->args[0] = t;
2195	return skb->len;
2196}
2197
2198void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2199{
2200	int chain;
2201	struct neigh_hash_table *nht;
2202
2203	rcu_read_lock_bh();
2204	nht = rcu_dereference_bh(tbl->nht);
2205
2206	read_lock(&tbl->lock); /* avoid resizes */
2207	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2208		struct neighbour *n;
2209
2210		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2211		     n != NULL;
2212		     n = rcu_dereference_bh(n->next))
2213			cb(n, cookie);
2214	}
2215	read_unlock(&tbl->lock);
2216	rcu_read_unlock_bh();
2217}
2218EXPORT_SYMBOL(neigh_for_each);
2219
2220/* The tbl->lock must be held as a writer and BH disabled. */
2221void __neigh_for_each_release(struct neigh_table *tbl,
2222			      int (*cb)(struct neighbour *))
2223{
2224	int chain;
2225	struct neigh_hash_table *nht;
2226
2227	nht = rcu_dereference_protected(tbl->nht,
2228					lockdep_is_held(&tbl->lock));
2229	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2230		struct neighbour *n;
2231		struct neighbour __rcu **np;
2232
2233		np = &nht->hash_buckets[chain];
2234		while ((n = rcu_dereference_protected(*np,
2235					lockdep_is_held(&tbl->lock))) != NULL) {
2236			int release;
2237
2238			write_lock(&n->lock);
2239			release = cb(n);
2240			if (release) {
2241				rcu_assign_pointer(*np,
2242					rcu_dereference_protected(n->next,
2243						lockdep_is_held(&tbl->lock)));
2244				n->dead = 1;
2245			} else
2246				np = &n->next;
2247			write_unlock(&n->lock);
2248			if (release)
2249				neigh_cleanup_and_release(n);
2250		}
2251	}
2252}
2253EXPORT_SYMBOL(__neigh_for_each_release);
2254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2255#ifdef CONFIG_PROC_FS
2256
2257static struct neighbour *neigh_get_first(struct seq_file *seq)
2258{
2259	struct neigh_seq_state *state = seq->private;
2260	struct net *net = seq_file_net(seq);
2261	struct neigh_hash_table *nht = state->nht;
2262	struct neighbour *n = NULL;
2263	int bucket = state->bucket;
2264
2265	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2266	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2267		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2268
2269		while (n) {
2270			if (!net_eq(dev_net(n->dev), net))
2271				goto next;
2272			if (state->neigh_sub_iter) {
2273				loff_t fakep = 0;
2274				void *v;
2275
2276				v = state->neigh_sub_iter(state, n, &fakep);
2277				if (!v)
2278					goto next;
2279			}
2280			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2281				break;
2282			if (n->nud_state & ~NUD_NOARP)
2283				break;
2284next:
2285			n = rcu_dereference_bh(n->next);
2286		}
2287
2288		if (n)
2289			break;
2290	}
2291	state->bucket = bucket;
2292
2293	return n;
2294}
2295
2296static struct neighbour *neigh_get_next(struct seq_file *seq,
2297					struct neighbour *n,
2298					loff_t *pos)
2299{
2300	struct neigh_seq_state *state = seq->private;
2301	struct net *net = seq_file_net(seq);
2302	struct neigh_hash_table *nht = state->nht;
2303
2304	if (state->neigh_sub_iter) {
2305		void *v = state->neigh_sub_iter(state, n, pos);
2306		if (v)
2307			return n;
2308	}
2309	n = rcu_dereference_bh(n->next);
2310
2311	while (1) {
2312		while (n) {
2313			if (!net_eq(dev_net(n->dev), net))
2314				goto next;
2315			if (state->neigh_sub_iter) {
2316				void *v = state->neigh_sub_iter(state, n, pos);
2317				if (v)
2318					return n;
2319				goto next;
2320			}
2321			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2322				break;
2323
2324			if (n->nud_state & ~NUD_NOARP)
2325				break;
2326next:
2327			n = rcu_dereference_bh(n->next);
2328		}
2329
2330		if (n)
2331			break;
2332
2333		if (++state->bucket >= (1 << nht->hash_shift))
2334			break;
2335
2336		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2337	}
2338
2339	if (n && pos)
2340		--(*pos);
2341	return n;
2342}
2343
2344static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2345{
2346	struct neighbour *n = neigh_get_first(seq);
2347
2348	if (n) {
2349		--(*pos);
2350		while (*pos) {
2351			n = neigh_get_next(seq, n, pos);
2352			if (!n)
2353				break;
2354		}
2355	}
2356	return *pos ? NULL : n;
2357}
2358
2359static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2360{
2361	struct neigh_seq_state *state = seq->private;
2362	struct net *net = seq_file_net(seq);
2363	struct neigh_table *tbl = state->tbl;
2364	struct pneigh_entry *pn = NULL;
2365	int bucket = state->bucket;
2366
2367	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2368	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2369		pn = tbl->phash_buckets[bucket];
2370		while (pn && !net_eq(pneigh_net(pn), net))
2371			pn = pn->next;
2372		if (pn)
2373			break;
2374	}
2375	state->bucket = bucket;
2376
2377	return pn;
2378}
2379
2380static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2381					    struct pneigh_entry *pn,
2382					    loff_t *pos)
2383{
2384	struct neigh_seq_state *state = seq->private;
2385	struct net *net = seq_file_net(seq);
2386	struct neigh_table *tbl = state->tbl;
2387
2388	pn = pn->next;
 
 
 
2389	while (!pn) {
2390		if (++state->bucket > PNEIGH_HASHMASK)
2391			break;
2392		pn = tbl->phash_buckets[state->bucket];
2393		while (pn && !net_eq(pneigh_net(pn), net))
2394			pn = pn->next;
2395		if (pn)
2396			break;
2397	}
2398
2399	if (pn && pos)
2400		--(*pos);
2401
2402	return pn;
2403}
2404
2405static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2406{
2407	struct pneigh_entry *pn = pneigh_get_first(seq);
2408
2409	if (pn) {
2410		--(*pos);
2411		while (*pos) {
2412			pn = pneigh_get_next(seq, pn, pos);
2413			if (!pn)
2414				break;
2415		}
2416	}
2417	return *pos ? NULL : pn;
2418}
2419
2420static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2421{
2422	struct neigh_seq_state *state = seq->private;
2423	void *rc;
2424	loff_t idxpos = *pos;
2425
2426	rc = neigh_get_idx(seq, &idxpos);
2427	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2428		rc = pneigh_get_idx(seq, &idxpos);
2429
2430	return rc;
2431}
2432
2433void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2434	__acquires(rcu_bh)
2435{
2436	struct neigh_seq_state *state = seq->private;
2437
2438	state->tbl = tbl;
2439	state->bucket = 0;
2440	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2441
2442	rcu_read_lock_bh();
2443	state->nht = rcu_dereference_bh(tbl->nht);
2444
2445	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2446}
2447EXPORT_SYMBOL(neigh_seq_start);
2448
2449void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2450{
2451	struct neigh_seq_state *state;
2452	void *rc;
2453
2454	if (v == SEQ_START_TOKEN) {
2455		rc = neigh_get_first(seq);
2456		goto out;
2457	}
2458
2459	state = seq->private;
2460	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2461		rc = neigh_get_next(seq, v, NULL);
2462		if (rc)
2463			goto out;
2464		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2465			rc = pneigh_get_first(seq);
2466	} else {
2467		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2468		rc = pneigh_get_next(seq, v, NULL);
2469	}
2470out:
2471	++(*pos);
2472	return rc;
2473}
2474EXPORT_SYMBOL(neigh_seq_next);
2475
2476void neigh_seq_stop(struct seq_file *seq, void *v)
2477	__releases(rcu_bh)
2478{
2479	rcu_read_unlock_bh();
2480}
2481EXPORT_SYMBOL(neigh_seq_stop);
2482
2483/* statistics via seq_file */
2484
2485static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2486{
2487	struct neigh_table *tbl = seq->private;
2488	int cpu;
2489
2490	if (*pos == 0)
2491		return SEQ_START_TOKEN;
2492
2493	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2494		if (!cpu_possible(cpu))
2495			continue;
2496		*pos = cpu+1;
2497		return per_cpu_ptr(tbl->stats, cpu);
2498	}
2499	return NULL;
2500}
2501
2502static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2503{
2504	struct neigh_table *tbl = seq->private;
2505	int cpu;
2506
2507	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2508		if (!cpu_possible(cpu))
2509			continue;
2510		*pos = cpu+1;
2511		return per_cpu_ptr(tbl->stats, cpu);
2512	}
2513	return NULL;
2514}
2515
2516static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2517{
2518
2519}
2520
2521static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2522{
2523	struct neigh_table *tbl = seq->private;
2524	struct neigh_statistics *st = v;
2525
2526	if (v == SEQ_START_TOKEN) {
2527		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
2528		return 0;
2529	}
2530
2531	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2532			"%08lx %08lx  %08lx %08lx %08lx\n",
2533		   atomic_read(&tbl->entries),
2534
2535		   st->allocs,
2536		   st->destroys,
2537		   st->hash_grows,
2538
2539		   st->lookups,
2540		   st->hits,
2541
2542		   st->res_failed,
2543
2544		   st->rcv_probes_mcast,
2545		   st->rcv_probes_ucast,
2546
2547		   st->periodic_gc_runs,
2548		   st->forced_gc_runs,
2549		   st->unres_discards
 
2550		   );
2551
2552	return 0;
2553}
2554
2555static const struct seq_operations neigh_stat_seq_ops = {
2556	.start	= neigh_stat_seq_start,
2557	.next	= neigh_stat_seq_next,
2558	.stop	= neigh_stat_seq_stop,
2559	.show	= neigh_stat_seq_show,
2560};
2561
2562static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2563{
2564	int ret = seq_open(file, &neigh_stat_seq_ops);
2565
2566	if (!ret) {
2567		struct seq_file *sf = file->private_data;
2568		sf->private = PDE(inode)->data;
2569	}
2570	return ret;
2571};
2572
2573static const struct file_operations neigh_stat_seq_fops = {
2574	.owner	 = THIS_MODULE,
2575	.open 	 = neigh_stat_seq_open,
2576	.read	 = seq_read,
2577	.llseek	 = seq_lseek,
2578	.release = seq_release,
2579};
2580
2581#endif /* CONFIG_PROC_FS */
2582
2583static inline size_t neigh_nlmsg_size(void)
2584{
2585	return NLMSG_ALIGN(sizeof(struct ndmsg))
2586	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2587	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2588	       + nla_total_size(sizeof(struct nda_cacheinfo))
2589	       + nla_total_size(4); /* NDA_PROBES */
2590}
2591
2592static void __neigh_notify(struct neighbour *n, int type, int flags)
 
2593{
2594	struct net *net = dev_net(n->dev);
2595	struct sk_buff *skb;
2596	int err = -ENOBUFS;
2597
2598	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2599	if (skb == NULL)
2600		goto errout;
2601
2602	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2603	if (err < 0) {
2604		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2605		WARN_ON(err == -EMSGSIZE);
2606		kfree_skb(skb);
2607		goto errout;
2608	}
2609	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2610	return;
2611errout:
2612	if (err < 0)
2613		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2614}
2615
2616#ifdef CONFIG_ARPD
2617void neigh_app_ns(struct neighbour *n)
2618{
2619	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2620}
2621EXPORT_SYMBOL(neigh_app_ns);
2622#endif /* CONFIG_ARPD */
2623
2624#ifdef CONFIG_SYSCTL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2625
2626#define NEIGH_VARS_MAX 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2627
2628static struct neigh_sysctl_table {
2629	struct ctl_table_header *sysctl_header;
2630	struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2631	char *dev_name;
2632} neigh_sysctl_template __read_mostly = {
2633	.neigh_vars = {
2634		{
2635			.procname	= "mcast_solicit",
2636			.maxlen		= sizeof(int),
2637			.mode		= 0644,
2638			.proc_handler	= proc_dointvec,
2639		},
2640		{
2641			.procname	= "ucast_solicit",
2642			.maxlen		= sizeof(int),
2643			.mode		= 0644,
2644			.proc_handler	= proc_dointvec,
2645		},
2646		{
2647			.procname	= "app_solicit",
2648			.maxlen		= sizeof(int),
2649			.mode		= 0644,
2650			.proc_handler	= proc_dointvec,
2651		},
2652		{
2653			.procname	= "retrans_time",
2654			.maxlen		= sizeof(int),
2655			.mode		= 0644,
2656			.proc_handler	= proc_dointvec_userhz_jiffies,
2657		},
2658		{
2659			.procname	= "base_reachable_time",
2660			.maxlen		= sizeof(int),
2661			.mode		= 0644,
2662			.proc_handler	= proc_dointvec_jiffies,
2663		},
2664		{
2665			.procname	= "delay_first_probe_time",
2666			.maxlen		= sizeof(int),
2667			.mode		= 0644,
2668			.proc_handler	= proc_dointvec_jiffies,
2669		},
2670		{
2671			.procname	= "gc_stale_time",
2672			.maxlen		= sizeof(int),
2673			.mode		= 0644,
2674			.proc_handler	= proc_dointvec_jiffies,
2675		},
2676		{
2677			.procname	= "unres_qlen",
2678			.maxlen		= sizeof(int),
2679			.mode		= 0644,
2680			.proc_handler	= proc_dointvec,
2681		},
2682		{
2683			.procname	= "proxy_qlen",
2684			.maxlen		= sizeof(int),
2685			.mode		= 0644,
2686			.proc_handler	= proc_dointvec,
2687		},
2688		{
2689			.procname	= "anycast_delay",
2690			.maxlen		= sizeof(int),
2691			.mode		= 0644,
2692			.proc_handler	= proc_dointvec_userhz_jiffies,
2693		},
2694		{
2695			.procname	= "proxy_delay",
2696			.maxlen		= sizeof(int),
2697			.mode		= 0644,
2698			.proc_handler	= proc_dointvec_userhz_jiffies,
2699		},
2700		{
2701			.procname	= "locktime",
2702			.maxlen		= sizeof(int),
2703			.mode		= 0644,
2704			.proc_handler	= proc_dointvec_userhz_jiffies,
2705		},
2706		{
2707			.procname	= "retrans_time_ms",
2708			.maxlen		= sizeof(int),
2709			.mode		= 0644,
2710			.proc_handler	= proc_dointvec_ms_jiffies,
2711		},
2712		{
2713			.procname	= "base_reachable_time_ms",
2714			.maxlen		= sizeof(int),
2715			.mode		= 0644,
2716			.proc_handler	= proc_dointvec_ms_jiffies,
2717		},
2718		{
2719			.procname	= "gc_interval",
2720			.maxlen		= sizeof(int),
2721			.mode		= 0644,
2722			.proc_handler	= proc_dointvec_jiffies,
2723		},
2724		{
2725			.procname	= "gc_thresh1",
2726			.maxlen		= sizeof(int),
2727			.mode		= 0644,
2728			.proc_handler	= proc_dointvec,
 
 
2729		},
2730		{
2731			.procname	= "gc_thresh2",
2732			.maxlen		= sizeof(int),
2733			.mode		= 0644,
2734			.proc_handler	= proc_dointvec,
 
 
2735		},
2736		{
2737			.procname	= "gc_thresh3",
2738			.maxlen		= sizeof(int),
2739			.mode		= 0644,
2740			.proc_handler	= proc_dointvec,
 
 
2741		},
2742		{},
2743	},
2744};
2745
2746int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2747			  char *p_name, proc_handler *handler)
2748{
 
2749	struct neigh_sysctl_table *t;
2750	const char *dev_name_source = NULL;
2751
2752#define NEIGH_CTL_PATH_ROOT	0
2753#define NEIGH_CTL_PATH_PROTO	1
2754#define NEIGH_CTL_PATH_NEIGH	2
2755#define NEIGH_CTL_PATH_DEV	3
2756
2757	struct ctl_path neigh_path[] = {
2758		{ .procname = "net",	 },
2759		{ .procname = "proto",	 },
2760		{ .procname = "neigh",	 },
2761		{ .procname = "default", },
2762		{ },
2763	};
2764
2765	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2766	if (!t)
2767		goto err;
2768
2769	t->neigh_vars[0].data  = &p->mcast_probes;
2770	t->neigh_vars[1].data  = &p->ucast_probes;
2771	t->neigh_vars[2].data  = &p->app_probes;
2772	t->neigh_vars[3].data  = &p->retrans_time;
2773	t->neigh_vars[4].data  = &p->base_reachable_time;
2774	t->neigh_vars[5].data  = &p->delay_probe_time;
2775	t->neigh_vars[6].data  = &p->gc_staletime;
2776	t->neigh_vars[7].data  = &p->queue_len;
2777	t->neigh_vars[8].data  = &p->proxy_qlen;
2778	t->neigh_vars[9].data  = &p->anycast_delay;
2779	t->neigh_vars[10].data = &p->proxy_delay;
2780	t->neigh_vars[11].data = &p->locktime;
2781	t->neigh_vars[12].data  = &p->retrans_time;
2782	t->neigh_vars[13].data  = &p->base_reachable_time;
2783
2784	if (dev) {
2785		dev_name_source = dev->name;
2786		/* Terminate the table early */
2787		memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
 
2788	} else {
2789		dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2790		t->neigh_vars[14].data = (int *)(p + 1);
2791		t->neigh_vars[15].data = (int *)(p + 1) + 1;
2792		t->neigh_vars[16].data = (int *)(p + 1) + 2;
2793		t->neigh_vars[17].data = (int *)(p + 1) + 3;
 
2794	}
2795
2796
2797	if (handler) {
2798		/* RetransTime */
2799		t->neigh_vars[3].proc_handler = handler;
2800		t->neigh_vars[3].extra1 = dev;
2801		/* ReachableTime */
2802		t->neigh_vars[4].proc_handler = handler;
2803		t->neigh_vars[4].extra1 = dev;
2804		/* RetransTime (in milliseconds)*/
2805		t->neigh_vars[12].proc_handler = handler;
2806		t->neigh_vars[12].extra1 = dev;
 
 
 
 
 
 
 
 
 
 
 
2807		/* ReachableTime (in milliseconds) */
2808		t->neigh_vars[13].proc_handler = handler;
2809		t->neigh_vars[13].extra1 = dev;
2810	}
2811
2812	t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2813	if (!t->dev_name)
2814		goto free;
2815
2816	neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2817	neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
 
 
 
 
 
 
 
 
2818
 
 
2819	t->sysctl_header =
2820		register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2821	if (!t->sysctl_header)
2822		goto free_procname;
2823
2824	p->sysctl_table = t;
2825	return 0;
2826
2827free_procname:
2828	kfree(t->dev_name);
2829free:
2830	kfree(t);
2831err:
2832	return -ENOBUFS;
2833}
2834EXPORT_SYMBOL(neigh_sysctl_register);
2835
2836void neigh_sysctl_unregister(struct neigh_parms *p)
2837{
2838	if (p->sysctl_table) {
2839		struct neigh_sysctl_table *t = p->sysctl_table;
2840		p->sysctl_table = NULL;
2841		unregister_sysctl_table(t->sysctl_header);
2842		kfree(t->dev_name);
2843		kfree(t);
2844	}
2845}
2846EXPORT_SYMBOL(neigh_sysctl_unregister);
2847
2848#endif	/* CONFIG_SYSCTL */
2849
2850static int __init neigh_init(void)
2851{
2852	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
2853	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
2854	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
2855
2856	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
2857		      NULL);
2858	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
2859
2860	return 0;
2861}
2862
2863subsys_initcall(neigh_init);
2864
v4.17
   1/*
   2 *	Generic address resolution entity
   3 *
   4 *	Authors:
   5 *	Pedro Roque		<roque@di.fc.ul.pt>
   6 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   7 *
   8 *	This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 *
  13 *	Fixes:
  14 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  15 *	Harald Welte		Add neighbour cache statistics like rtstat
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/socket.h>
  25#include <linux/netdevice.h>
  26#include <linux/proc_fs.h>
  27#ifdef CONFIG_SYSCTL
  28#include <linux/sysctl.h>
  29#endif
  30#include <linux/times.h>
  31#include <net/net_namespace.h>
  32#include <net/neighbour.h>
  33#include <net/dst.h>
  34#include <net/sock.h>
  35#include <net/netevent.h>
  36#include <net/netlink.h>
  37#include <linux/rtnetlink.h>
  38#include <linux/random.h>
  39#include <linux/string.h>
  40#include <linux/log2.h>
  41#include <linux/inetdevice.h>
  42#include <net/addrconf.h>
  43
  44#define DEBUG
  45#define NEIGH_DEBUG 1
  46#define neigh_dbg(level, fmt, ...)		\
  47do {						\
  48	if (level <= NEIGH_DEBUG)		\
  49		pr_debug(fmt, ##__VA_ARGS__);	\
  50} while (0)
 
 
 
 
 
 
 
 
 
  51
  52#define PNEIGH_HASHMASK		0xF
  53
  54static void neigh_timer_handler(struct timer_list *t);
  55static void __neigh_notify(struct neighbour *n, int type, int flags,
  56			   u32 pid);
  57static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
  58static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
  59				    struct net_device *dev);
  60
 
  61#ifdef CONFIG_PROC_FS
  62static const struct file_operations neigh_stat_seq_fops;
  63#endif
  64
  65/*
  66   Neighbour hash table buckets are protected with rwlock tbl->lock.
  67
  68   - All the scans/updates to hash buckets MUST be made under this lock.
  69   - NOTHING clever should be made under this lock: no callbacks
  70     to protocol backends, no attempts to send something to network.
  71     It will result in deadlocks, if backend/driver wants to use neighbour
  72     cache.
  73   - If the entry requires some non-trivial actions, increase
  74     its reference count and release table lock.
  75
  76   Neighbour entries are protected:
  77   - with reference count.
  78   - with rwlock neigh->lock
  79
  80   Reference count prevents destruction.
  81
  82   neigh->lock mainly serializes ll address data and its validity state.
  83   However, the same lock is used to protect another entry fields:
  84    - timer
  85    - resolution queue
  86
  87   Again, nothing clever shall be made under neigh->lock,
  88   the most complicated procedure, which we allow is dev->hard_header.
  89   It is supposed, that dev->hard_header is simplistic and does
  90   not make callbacks to neighbour tables.
 
 
 
  91 */
  92
 
 
  93static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
  94{
  95	kfree_skb(skb);
  96	return -ENETDOWN;
  97}
  98
  99static void neigh_cleanup_and_release(struct neighbour *neigh)
 100{
 101	if (neigh->parms->neigh_cleanup)
 102		neigh->parms->neigh_cleanup(neigh);
 103
 104	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
 105	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
 106	neigh_release(neigh);
 107}
 108
 109/*
 110 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 111 * It corresponds to default IPv6 settings and is not overridable,
 112 * because it is really reasonable choice.
 113 */
 114
 115unsigned long neigh_rand_reach_time(unsigned long base)
 116{
 117	return base ? (prandom_u32() % base) + (base >> 1) : 0;
 118}
 119EXPORT_SYMBOL(neigh_rand_reach_time);
 120
 121
 122static bool neigh_del(struct neighbour *n, __u8 state,
 123		      struct neighbour __rcu **np, struct neigh_table *tbl)
 124{
 125	bool retval = false;
 126
 127	write_lock(&n->lock);
 128	if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
 129		struct neighbour *neigh;
 130
 131		neigh = rcu_dereference_protected(n->next,
 132						  lockdep_is_held(&tbl->lock));
 133		rcu_assign_pointer(*np, neigh);
 134		n->dead = 1;
 135		retval = true;
 136	}
 137	write_unlock(&n->lock);
 138	if (retval)
 139		neigh_cleanup_and_release(n);
 140	return retval;
 141}
 142
 143bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
 144{
 145	struct neigh_hash_table *nht;
 146	void *pkey = ndel->primary_key;
 147	u32 hash_val;
 148	struct neighbour *n;
 149	struct neighbour __rcu **np;
 150
 151	nht = rcu_dereference_protected(tbl->nht,
 152					lockdep_is_held(&tbl->lock));
 153	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
 154	hash_val = hash_val >> (32 - nht->hash_shift);
 155
 156	np = &nht->hash_buckets[hash_val];
 157	while ((n = rcu_dereference_protected(*np,
 158					      lockdep_is_held(&tbl->lock)))) {
 159		if (n == ndel)
 160			return neigh_del(n, 0, np, tbl);
 161		np = &n->next;
 162	}
 163	return false;
 164}
 165
 166static int neigh_forced_gc(struct neigh_table *tbl)
 167{
 168	int shrunk = 0;
 169	int i;
 170	struct neigh_hash_table *nht;
 171
 172	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 173
 174	write_lock_bh(&tbl->lock);
 175	nht = rcu_dereference_protected(tbl->nht,
 176					lockdep_is_held(&tbl->lock));
 177	for (i = 0; i < (1 << nht->hash_shift); i++) {
 178		struct neighbour *n;
 179		struct neighbour __rcu **np;
 180
 181		np = &nht->hash_buckets[i];
 182		while ((n = rcu_dereference_protected(*np,
 183					lockdep_is_held(&tbl->lock))) != NULL) {
 184			/* Neighbour record may be discarded if:
 185			 * - nobody refers to it.
 186			 * - it is not permanent
 187			 */
 188			if (neigh_del(n, NUD_PERMANENT, np, tbl)) {
 189				shrunk = 1;
 
 
 
 
 
 
 
 
 190				continue;
 191			}
 
 192			np = &n->next;
 193		}
 194	}
 195
 196	tbl->last_flush = jiffies;
 197
 198	write_unlock_bh(&tbl->lock);
 199
 200	return shrunk;
 201}
 202
 203static void neigh_add_timer(struct neighbour *n, unsigned long when)
 204{
 205	neigh_hold(n);
 206	if (unlikely(mod_timer(&n->timer, when))) {
 207		printk("NEIGH: BUG, double timer add, state is %x\n",
 208		       n->nud_state);
 209		dump_stack();
 210	}
 211}
 212
 213static int neigh_del_timer(struct neighbour *n)
 214{
 215	if ((n->nud_state & NUD_IN_TIMER) &&
 216	    del_timer(&n->timer)) {
 217		neigh_release(n);
 218		return 1;
 219	}
 220	return 0;
 221}
 222
 223static void pneigh_queue_purge(struct sk_buff_head *list)
 224{
 225	struct sk_buff *skb;
 226
 227	while ((skb = skb_dequeue(list)) != NULL) {
 228		dev_put(skb->dev);
 229		kfree_skb(skb);
 230	}
 231}
 232
 233static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 234{
 235	int i;
 236	struct neigh_hash_table *nht;
 237
 238	nht = rcu_dereference_protected(tbl->nht,
 239					lockdep_is_held(&tbl->lock));
 240
 241	for (i = 0; i < (1 << nht->hash_shift); i++) {
 242		struct neighbour *n;
 243		struct neighbour __rcu **np = &nht->hash_buckets[i];
 244
 245		while ((n = rcu_dereference_protected(*np,
 246					lockdep_is_held(&tbl->lock))) != NULL) {
 247			if (dev && n->dev != dev) {
 248				np = &n->next;
 249				continue;
 250			}
 251			rcu_assign_pointer(*np,
 252				   rcu_dereference_protected(n->next,
 253						lockdep_is_held(&tbl->lock)));
 254			write_lock(&n->lock);
 255			neigh_del_timer(n);
 256			n->dead = 1;
 257
 258			if (refcount_read(&n->refcnt) != 1) {
 259				/* The most unpleasant situation.
 260				   We must destroy neighbour entry,
 261				   but someone still uses it.
 262
 263				   The destroy will be delayed until
 264				   the last user releases us, but
 265				   we must kill timers etc. and move
 266				   it to safe state.
 267				 */
 268				__skb_queue_purge(&n->arp_queue);
 269				n->arp_queue_len_bytes = 0;
 270				n->output = neigh_blackhole;
 271				if (n->nud_state & NUD_VALID)
 272					n->nud_state = NUD_NOARP;
 273				else
 274					n->nud_state = NUD_NONE;
 275				neigh_dbg(2, "neigh %p is stray\n", n);
 276			}
 277			write_unlock(&n->lock);
 278			neigh_cleanup_and_release(n);
 279		}
 280	}
 281}
 282
 283void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 284{
 285	write_lock_bh(&tbl->lock);
 286	neigh_flush_dev(tbl, dev);
 287	write_unlock_bh(&tbl->lock);
 288}
 289EXPORT_SYMBOL(neigh_changeaddr);
 290
 291int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 292{
 293	write_lock_bh(&tbl->lock);
 294	neigh_flush_dev(tbl, dev);
 295	pneigh_ifdown_and_unlock(tbl, dev);
 
 296
 297	del_timer_sync(&tbl->proxy_timer);
 298	pneigh_queue_purge(&tbl->proxy_queue);
 299	return 0;
 300}
 301EXPORT_SYMBOL(neigh_ifdown);
 302
 303static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
 304{
 305	struct neighbour *n = NULL;
 306	unsigned long now = jiffies;
 307	int entries;
 308
 309	entries = atomic_inc_return(&tbl->entries) - 1;
 310	if (entries >= tbl->gc_thresh3 ||
 311	    (entries >= tbl->gc_thresh2 &&
 312	     time_after(now, tbl->last_flush + 5 * HZ))) {
 313		if (!neigh_forced_gc(tbl) &&
 314		    entries >= tbl->gc_thresh3) {
 315			net_info_ratelimited("%s: neighbor table overflow!\n",
 316					     tbl->id);
 317			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 318			goto out_entries;
 319		}
 320	}
 321
 322	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 323	if (!n)
 324		goto out_entries;
 325
 326	__skb_queue_head_init(&n->arp_queue);
 327	rwlock_init(&n->lock);
 328	seqlock_init(&n->ha_lock);
 329	n->updated	  = n->used = now;
 330	n->nud_state	  = NUD_NONE;
 331	n->output	  = neigh_blackhole;
 332	seqlock_init(&n->hh.hh_lock);
 333	n->parms	  = neigh_parms_clone(&tbl->parms);
 334	timer_setup(&n->timer, neigh_timer_handler, 0);
 335
 336	NEIGH_CACHE_STAT_INC(tbl, allocs);
 337	n->tbl		  = tbl;
 338	refcount_set(&n->refcnt, 1);
 339	n->dead		  = 1;
 340out:
 341	return n;
 342
 343out_entries:
 344	atomic_dec(&tbl->entries);
 345	goto out;
 346}
 347
 348static void neigh_get_hash_rnd(u32 *x)
 349{
 350	*x = get_random_u32() | 1;
 351}
 352
 353static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 354{
 355	size_t size = (1 << shift) * sizeof(struct neighbour *);
 356	struct neigh_hash_table *ret;
 357	struct neighbour __rcu **buckets;
 358	int i;
 359
 360	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 361	if (!ret)
 362		return NULL;
 363	if (size <= PAGE_SIZE)
 364		buckets = kzalloc(size, GFP_ATOMIC);
 365	else
 366		buckets = (struct neighbour __rcu **)
 367			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 368					   get_order(size));
 369	if (!buckets) {
 370		kfree(ret);
 371		return NULL;
 372	}
 373	ret->hash_buckets = buckets;
 374	ret->hash_shift = shift;
 375	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 376		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 377	return ret;
 378}
 379
 380static void neigh_hash_free_rcu(struct rcu_head *head)
 381{
 382	struct neigh_hash_table *nht = container_of(head,
 383						    struct neigh_hash_table,
 384						    rcu);
 385	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 386	struct neighbour __rcu **buckets = nht->hash_buckets;
 387
 388	if (size <= PAGE_SIZE)
 389		kfree(buckets);
 390	else
 391		free_pages((unsigned long)buckets, get_order(size));
 392	kfree(nht);
 393}
 394
 395static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 396						unsigned long new_shift)
 397{
 398	unsigned int i, hash;
 399	struct neigh_hash_table *new_nht, *old_nht;
 400
 401	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 402
 403	old_nht = rcu_dereference_protected(tbl->nht,
 404					    lockdep_is_held(&tbl->lock));
 405	new_nht = neigh_hash_alloc(new_shift);
 406	if (!new_nht)
 407		return old_nht;
 408
 409	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 410		struct neighbour *n, *next;
 411
 412		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 413						   lockdep_is_held(&tbl->lock));
 414		     n != NULL;
 415		     n = next) {
 416			hash = tbl->hash(n->primary_key, n->dev,
 417					 new_nht->hash_rnd);
 418
 419			hash >>= (32 - new_nht->hash_shift);
 420			next = rcu_dereference_protected(n->next,
 421						lockdep_is_held(&tbl->lock));
 422
 423			rcu_assign_pointer(n->next,
 424					   rcu_dereference_protected(
 425						new_nht->hash_buckets[hash],
 426						lockdep_is_held(&tbl->lock)));
 427			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 428		}
 429	}
 430
 431	rcu_assign_pointer(tbl->nht, new_nht);
 432	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 433	return new_nht;
 434}
 435
 436struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 437			       struct net_device *dev)
 438{
 439	struct neighbour *n;
 
 
 
 440
 441	NEIGH_CACHE_STAT_INC(tbl, lookups);
 442
 443	rcu_read_lock_bh();
 444	n = __neigh_lookup_noref(tbl, pkey, dev);
 445	if (n) {
 446		if (!refcount_inc_not_zero(&n->refcnt))
 447			n = NULL;
 448		NEIGH_CACHE_STAT_INC(tbl, hits);
 
 
 
 
 
 
 
 449	}
 450
 451	rcu_read_unlock_bh();
 452	return n;
 453}
 454EXPORT_SYMBOL(neigh_lookup);
 455
 456struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 457				     const void *pkey)
 458{
 459	struct neighbour *n;
 460	unsigned int key_len = tbl->key_len;
 461	u32 hash_val;
 462	struct neigh_hash_table *nht;
 463
 464	NEIGH_CACHE_STAT_INC(tbl, lookups);
 465
 466	rcu_read_lock_bh();
 467	nht = rcu_dereference_bh(tbl->nht);
 468	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
 469
 470	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 471	     n != NULL;
 472	     n = rcu_dereference_bh(n->next)) {
 473		if (!memcmp(n->primary_key, pkey, key_len) &&
 474		    net_eq(dev_net(n->dev), net)) {
 475			if (!refcount_inc_not_zero(&n->refcnt))
 476				n = NULL;
 477			NEIGH_CACHE_STAT_INC(tbl, hits);
 478			break;
 479		}
 480	}
 481
 482	rcu_read_unlock_bh();
 483	return n;
 484}
 485EXPORT_SYMBOL(neigh_lookup_nodev);
 486
 487struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 488				 struct net_device *dev, bool want_ref)
 489{
 490	u32 hash_val;
 491	unsigned int key_len = tbl->key_len;
 492	int error;
 493	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
 494	struct neigh_hash_table *nht;
 495
 496	if (!n) {
 497		rc = ERR_PTR(-ENOBUFS);
 498		goto out;
 499	}
 500
 501	memcpy(n->primary_key, pkey, key_len);
 502	n->dev = dev;
 503	dev_hold(dev);
 504
 505	/* Protocol specific setup. */
 506	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 507		rc = ERR_PTR(error);
 508		goto out_neigh_release;
 509	}
 510
 511	if (dev->netdev_ops->ndo_neigh_construct) {
 512		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
 513		if (error < 0) {
 514			rc = ERR_PTR(error);
 515			goto out_neigh_release;
 516		}
 517	}
 518
 519	/* Device specific setup. */
 520	if (n->parms->neigh_setup &&
 521	    (error = n->parms->neigh_setup(n)) < 0) {
 522		rc = ERR_PTR(error);
 523		goto out_neigh_release;
 524	}
 525
 526	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 527
 528	write_lock_bh(&tbl->lock);
 529	nht = rcu_dereference_protected(tbl->nht,
 530					lockdep_is_held(&tbl->lock));
 531
 532	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 533		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 534
 535	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 536
 537	if (n->parms->dead) {
 538		rc = ERR_PTR(-EINVAL);
 539		goto out_tbl_unlock;
 540	}
 541
 542	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 543					    lockdep_is_held(&tbl->lock));
 544	     n1 != NULL;
 545	     n1 = rcu_dereference_protected(n1->next,
 546			lockdep_is_held(&tbl->lock))) {
 547		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
 548			if (want_ref)
 549				neigh_hold(n1);
 550			rc = n1;
 551			goto out_tbl_unlock;
 552		}
 553	}
 554
 555	n->dead = 0;
 556	if (want_ref)
 557		neigh_hold(n);
 558	rcu_assign_pointer(n->next,
 559			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 560						     lockdep_is_held(&tbl->lock)));
 561	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 562	write_unlock_bh(&tbl->lock);
 563	neigh_dbg(2, "neigh %p is created\n", n);
 564	rc = n;
 565out:
 566	return rc;
 567out_tbl_unlock:
 568	write_unlock_bh(&tbl->lock);
 569out_neigh_release:
 570	neigh_release(n);
 571	goto out;
 572}
 573EXPORT_SYMBOL(__neigh_create);
 574
 575static u32 pneigh_hash(const void *pkey, unsigned int key_len)
 576{
 577	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 578	hash_val ^= (hash_val >> 16);
 579	hash_val ^= hash_val >> 8;
 580	hash_val ^= hash_val >> 4;
 581	hash_val &= PNEIGH_HASHMASK;
 582	return hash_val;
 583}
 584
 585static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 586					      struct net *net,
 587					      const void *pkey,
 588					      unsigned int key_len,
 589					      struct net_device *dev)
 590{
 591	while (n) {
 592		if (!memcmp(n->key, pkey, key_len) &&
 593		    net_eq(pneigh_net(n), net) &&
 594		    (n->dev == dev || !n->dev))
 595			return n;
 596		n = n->next;
 597	}
 598	return NULL;
 599}
 600
 601struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 602		struct net *net, const void *pkey, struct net_device *dev)
 603{
 604	unsigned int key_len = tbl->key_len;
 605	u32 hash_val = pneigh_hash(pkey, key_len);
 606
 607	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 608				 net, pkey, key_len, dev);
 609}
 610EXPORT_SYMBOL_GPL(__pneigh_lookup);
 611
 612struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 613				    struct net *net, const void *pkey,
 614				    struct net_device *dev, int creat)
 615{
 616	struct pneigh_entry *n;
 617	unsigned int key_len = tbl->key_len;
 618	u32 hash_val = pneigh_hash(pkey, key_len);
 619
 620	read_lock_bh(&tbl->lock);
 621	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 622			      net, pkey, key_len, dev);
 623	read_unlock_bh(&tbl->lock);
 624
 625	if (n || !creat)
 626		goto out;
 627
 628	ASSERT_RTNL();
 629
 630	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
 631	if (!n)
 632		goto out;
 633
 634	write_pnet(&n->net, net);
 635	memcpy(n->key, pkey, key_len);
 636	n->dev = dev;
 637	if (dev)
 638		dev_hold(dev);
 639
 640	if (tbl->pconstructor && tbl->pconstructor(n)) {
 641		if (dev)
 642			dev_put(dev);
 
 643		kfree(n);
 644		n = NULL;
 645		goto out;
 646	}
 647
 648	write_lock_bh(&tbl->lock);
 649	n->next = tbl->phash_buckets[hash_val];
 650	tbl->phash_buckets[hash_val] = n;
 651	write_unlock_bh(&tbl->lock);
 652out:
 653	return n;
 654}
 655EXPORT_SYMBOL(pneigh_lookup);
 656
 657
 658int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 659		  struct net_device *dev)
 660{
 661	struct pneigh_entry *n, **np;
 662	unsigned int key_len = tbl->key_len;
 663	u32 hash_val = pneigh_hash(pkey, key_len);
 664
 665	write_lock_bh(&tbl->lock);
 666	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 667	     np = &n->next) {
 668		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 669		    net_eq(pneigh_net(n), net)) {
 670			*np = n->next;
 671			write_unlock_bh(&tbl->lock);
 672			if (tbl->pdestructor)
 673				tbl->pdestructor(n);
 674			if (n->dev)
 675				dev_put(n->dev);
 
 676			kfree(n);
 677			return 0;
 678		}
 679	}
 680	write_unlock_bh(&tbl->lock);
 681	return -ENOENT;
 682}
 683
 684static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
 685				    struct net_device *dev)
 686{
 687	struct pneigh_entry *n, **np, *freelist = NULL;
 688	u32 h;
 689
 690	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 691		np = &tbl->phash_buckets[h];
 692		while ((n = *np) != NULL) {
 693			if (!dev || n->dev == dev) {
 694				*np = n->next;
 695				n->next = freelist;
 696				freelist = n;
 
 
 
 
 697				continue;
 698			}
 699			np = &n->next;
 700		}
 701	}
 702	write_unlock_bh(&tbl->lock);
 703	while ((n = freelist)) {
 704		freelist = n->next;
 705		n->next = NULL;
 706		if (tbl->pdestructor)
 707			tbl->pdestructor(n);
 708		if (n->dev)
 709			dev_put(n->dev);
 710		kfree(n);
 711	}
 712	return -ENOENT;
 713}
 714
 715static void neigh_parms_destroy(struct neigh_parms *parms);
 716
 717static inline void neigh_parms_put(struct neigh_parms *parms)
 718{
 719	if (refcount_dec_and_test(&parms->refcnt))
 720		neigh_parms_destroy(parms);
 721}
 722
 
 
 
 
 
 
 723/*
 724 *	neighbour must already be out of the table;
 725 *
 726 */
 727void neigh_destroy(struct neighbour *neigh)
 728{
 729	struct net_device *dev = neigh->dev;
 730
 731	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 732
 733	if (!neigh->dead) {
 734		pr_warn("Destroying alive neighbour %p\n", neigh);
 
 735		dump_stack();
 736		return;
 737	}
 738
 739	if (neigh_del_timer(neigh))
 740		pr_warn("Impossible event\n");
 741
 742	write_lock_bh(&neigh->lock);
 743	__skb_queue_purge(&neigh->arp_queue);
 744	write_unlock_bh(&neigh->lock);
 745	neigh->arp_queue_len_bytes = 0;
 746
 747	if (dev->netdev_ops->ndo_neigh_destroy)
 748		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
 749
 750	dev_put(dev);
 751	neigh_parms_put(neigh->parms);
 752
 753	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 754
 755	atomic_dec(&neigh->tbl->entries);
 756	kfree_rcu(neigh, rcu);
 757}
 758EXPORT_SYMBOL(neigh_destroy);
 759
 760/* Neighbour state is suspicious;
 761   disable fast path.
 762
 763   Called with write_locked neigh.
 764 */
 765static void neigh_suspect(struct neighbour *neigh)
 766{
 767	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 768
 769	neigh->output = neigh->ops->output;
 770}
 771
 772/* Neighbour state is OK;
 773   enable fast path.
 774
 775   Called with write_locked neigh.
 776 */
 777static void neigh_connect(struct neighbour *neigh)
 778{
 779	neigh_dbg(2, "neigh %p is connected\n", neigh);
 780
 781	neigh->output = neigh->ops->connected_output;
 782}
 783
 784static void neigh_periodic_work(struct work_struct *work)
 785{
 786	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 787	struct neighbour *n;
 788	struct neighbour __rcu **np;
 789	unsigned int i;
 790	struct neigh_hash_table *nht;
 791
 792	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 793
 794	write_lock_bh(&tbl->lock);
 795	nht = rcu_dereference_protected(tbl->nht,
 796					lockdep_is_held(&tbl->lock));
 797
 798	/*
 799	 *	periodically recompute ReachableTime from random function
 800	 */
 801
 802	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 803		struct neigh_parms *p;
 804		tbl->last_rand = jiffies;
 805		list_for_each_entry(p, &tbl->parms_list, list)
 806			p->reachable_time =
 807				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 808	}
 809
 810	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
 811		goto out;
 812
 813	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 814		np = &nht->hash_buckets[i];
 815
 816		while ((n = rcu_dereference_protected(*np,
 817				lockdep_is_held(&tbl->lock))) != NULL) {
 818			unsigned int state;
 819
 820			write_lock(&n->lock);
 821
 822			state = n->nud_state;
 823			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
 824				write_unlock(&n->lock);
 825				goto next_elt;
 826			}
 827
 828			if (time_before(n->used, n->confirmed))
 829				n->used = n->confirmed;
 830
 831			if (refcount_read(&n->refcnt) == 1 &&
 832			    (state == NUD_FAILED ||
 833			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 834				*np = n->next;
 835				n->dead = 1;
 836				write_unlock(&n->lock);
 837				neigh_cleanup_and_release(n);
 838				continue;
 839			}
 840			write_unlock(&n->lock);
 841
 842next_elt:
 843			np = &n->next;
 844		}
 845		/*
 846		 * It's fine to release lock here, even if hash table
 847		 * grows while we are preempted.
 848		 */
 849		write_unlock_bh(&tbl->lock);
 850		cond_resched();
 851		write_lock_bh(&tbl->lock);
 852		nht = rcu_dereference_protected(tbl->nht,
 853						lockdep_is_held(&tbl->lock));
 854	}
 855out:
 856	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 857	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 858	 * BASE_REACHABLE_TIME.
 859	 */
 860	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 861			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 862	write_unlock_bh(&tbl->lock);
 863}
 864
 865static __inline__ int neigh_max_probes(struct neighbour *n)
 866{
 867	struct neigh_parms *p = n->parms;
 868	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 869	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 870	        NEIGH_VAR(p, MCAST_PROBES));
 871}
 872
 873static void neigh_invalidate(struct neighbour *neigh)
 874	__releases(neigh->lock)
 875	__acquires(neigh->lock)
 876{
 877	struct sk_buff *skb;
 878
 879	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 880	neigh_dbg(2, "neigh %p is failed\n", neigh);
 881	neigh->updated = jiffies;
 882
 883	/* It is very thin place. report_unreachable is very complicated
 884	   routine. Particularly, it can hit the same neighbour entry!
 885
 886	   So that, we try to be accurate and avoid dead loop. --ANK
 887	 */
 888	while (neigh->nud_state == NUD_FAILED &&
 889	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
 890		write_unlock(&neigh->lock);
 891		neigh->ops->error_report(neigh, skb);
 892		write_lock(&neigh->lock);
 893	}
 894	__skb_queue_purge(&neigh->arp_queue);
 895	neigh->arp_queue_len_bytes = 0;
 896}
 897
 898static void neigh_probe(struct neighbour *neigh)
 899	__releases(neigh->lock)
 900{
 901	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
 902	/* keep skb alive even if arp_queue overflows */
 903	if (skb)
 904		skb = skb_clone(skb, GFP_ATOMIC);
 905	write_unlock(&neigh->lock);
 906	if (neigh->ops->solicit)
 907		neigh->ops->solicit(neigh, skb);
 908	atomic_inc(&neigh->probes);
 909	kfree_skb(skb);
 910}
 911
 912/* Called when a timer expires for a neighbour entry. */
 913
 914static void neigh_timer_handler(struct timer_list *t)
 915{
 916	unsigned long now, next;
 917	struct neighbour *neigh = from_timer(neigh, t, timer);
 918	unsigned int state;
 919	int notify = 0;
 920
 921	write_lock(&neigh->lock);
 922
 923	state = neigh->nud_state;
 924	now = jiffies;
 925	next = now + HZ;
 926
 927	if (!(state & NUD_IN_TIMER))
 
 
 
 928		goto out;
 
 929
 930	if (state & NUD_REACHABLE) {
 931		if (time_before_eq(now,
 932				   neigh->confirmed + neigh->parms->reachable_time)) {
 933			neigh_dbg(2, "neigh %p is still alive\n", neigh);
 934			next = neigh->confirmed + neigh->parms->reachable_time;
 935		} else if (time_before_eq(now,
 936					  neigh->used +
 937					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 938			neigh_dbg(2, "neigh %p is delayed\n", neigh);
 939			neigh->nud_state = NUD_DELAY;
 940			neigh->updated = jiffies;
 941			neigh_suspect(neigh);
 942			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
 943		} else {
 944			neigh_dbg(2, "neigh %p is suspected\n", neigh);
 945			neigh->nud_state = NUD_STALE;
 946			neigh->updated = jiffies;
 947			neigh_suspect(neigh);
 948			notify = 1;
 949		}
 950	} else if (state & NUD_DELAY) {
 951		if (time_before_eq(now,
 952				   neigh->confirmed +
 953				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
 954			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
 955			neigh->nud_state = NUD_REACHABLE;
 956			neigh->updated = jiffies;
 957			neigh_connect(neigh);
 958			notify = 1;
 959			next = neigh->confirmed + neigh->parms->reachable_time;
 960		} else {
 961			neigh_dbg(2, "neigh %p is probed\n", neigh);
 962			neigh->nud_state = NUD_PROBE;
 963			neigh->updated = jiffies;
 964			atomic_set(&neigh->probes, 0);
 965			notify = 1;
 966			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 967		}
 968	} else {
 969		/* NUD_PROBE|NUD_INCOMPLETE */
 970		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 971	}
 972
 973	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
 974	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
 975		neigh->nud_state = NUD_FAILED;
 976		notify = 1;
 977		neigh_invalidate(neigh);
 978		goto out;
 979	}
 980
 981	if (neigh->nud_state & NUD_IN_TIMER) {
 982		if (time_before(next, jiffies + HZ/2))
 983			next = jiffies + HZ/2;
 984		if (!mod_timer(&neigh->timer, next))
 985			neigh_hold(neigh);
 986	}
 987	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
 988		neigh_probe(neigh);
 
 
 
 
 
 
 
 989	} else {
 990out:
 991		write_unlock(&neigh->lock);
 992	}
 993
 994	if (notify)
 995		neigh_update_notify(neigh, 0);
 996
 997	neigh_release(neigh);
 998}
 999
1000int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1001{
1002	int rc;
1003	bool immediate_probe = false;
1004
1005	write_lock_bh(&neigh->lock);
1006
1007	rc = 0;
1008	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1009		goto out_unlock_bh;
1010	if (neigh->dead)
1011		goto out_dead;
1012
1013	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1014		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1015		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1016			unsigned long next, now = jiffies;
1017
1018			atomic_set(&neigh->probes,
1019				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1020			neigh->nud_state     = NUD_INCOMPLETE;
1021			neigh->updated = now;
1022			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1023					 HZ/2);
1024			neigh_add_timer(neigh, next);
1025			immediate_probe = true;
1026		} else {
1027			neigh->nud_state = NUD_FAILED;
1028			neigh->updated = jiffies;
1029			write_unlock_bh(&neigh->lock);
1030
1031			kfree_skb(skb);
1032			return 1;
1033		}
1034	} else if (neigh->nud_state & NUD_STALE) {
1035		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1036		neigh->nud_state = NUD_DELAY;
1037		neigh->updated = jiffies;
1038		neigh_add_timer(neigh, jiffies +
1039				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1040	}
1041
1042	if (neigh->nud_state == NUD_INCOMPLETE) {
1043		if (skb) {
1044			while (neigh->arp_queue_len_bytes + skb->truesize >
1045			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1046				struct sk_buff *buff;
1047
1048				buff = __skb_dequeue(&neigh->arp_queue);
1049				if (!buff)
1050					break;
1051				neigh->arp_queue_len_bytes -= buff->truesize;
1052				kfree_skb(buff);
1053				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1054			}
1055			skb_dst_force(skb);
1056			__skb_queue_tail(&neigh->arp_queue, skb);
1057			neigh->arp_queue_len_bytes += skb->truesize;
1058		}
1059		rc = 1;
1060	}
1061out_unlock_bh:
1062	if (immediate_probe)
1063		neigh_probe(neigh);
1064	else
1065		write_unlock(&neigh->lock);
1066	local_bh_enable();
1067	return rc;
1068
1069out_dead:
1070	if (neigh->nud_state & NUD_STALE)
1071		goto out_unlock_bh;
1072	write_unlock_bh(&neigh->lock);
1073	kfree_skb(skb);
1074	return 1;
1075}
1076EXPORT_SYMBOL(__neigh_event_send);
1077
1078static void neigh_update_hhs(struct neighbour *neigh)
1079{
1080	struct hh_cache *hh;
1081	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1082		= NULL;
1083
1084	if (neigh->dev->header_ops)
1085		update = neigh->dev->header_ops->cache_update;
1086
1087	if (update) {
1088		hh = &neigh->hh;
1089		if (hh->hh_len) {
1090			write_seqlock_bh(&hh->hh_lock);
1091			update(hh, neigh->dev, neigh->ha);
1092			write_sequnlock_bh(&hh->hh_lock);
1093		}
1094	}
1095}
1096
1097
1098
1099/* Generic update routine.
1100   -- lladdr is new lladdr or NULL, if it is not supplied.
1101   -- new    is new state.
1102   -- flags
1103	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1104				if it is different.
1105	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1106				lladdr instead of overriding it
1107				if it is different.
 
 
1108	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1109
1110	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1111				NTF_ROUTER flag.
1112	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1113				a router.
1114
1115   Caller MUST hold reference count on the entry.
1116 */
1117
1118int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1119		 u32 flags, u32 nlmsg_pid)
1120{
1121	u8 old;
1122	int err;
1123	int notify = 0;
1124	struct net_device *dev;
1125	int update_isrouter = 0;
1126
1127	write_lock_bh(&neigh->lock);
1128
1129	dev    = neigh->dev;
1130	old    = neigh->nud_state;
1131	err    = -EPERM;
1132
1133	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1134	    (old & (NUD_NOARP | NUD_PERMANENT)))
1135		goto out;
1136	if (neigh->dead)
1137		goto out;
1138
1139	if (!(new & NUD_VALID)) {
1140		neigh_del_timer(neigh);
1141		if (old & NUD_CONNECTED)
1142			neigh_suspect(neigh);
1143		neigh->nud_state = new;
1144		err = 0;
1145		notify = old & NUD_VALID;
1146		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1147		    (new & NUD_FAILED)) {
1148			neigh_invalidate(neigh);
1149			notify = 1;
1150		}
1151		goto out;
1152	}
1153
1154	/* Compare new lladdr with cached one */
1155	if (!dev->addr_len) {
1156		/* First case: device needs no address. */
1157		lladdr = neigh->ha;
1158	} else if (lladdr) {
1159		/* The second case: if something is already cached
1160		   and a new address is proposed:
1161		   - compare new & old
1162		   - if they are different, check override flag
1163		 */
1164		if ((old & NUD_VALID) &&
1165		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1166			lladdr = neigh->ha;
1167	} else {
1168		/* No address is supplied; if we know something,
1169		   use it, otherwise discard the request.
1170		 */
1171		err = -EINVAL;
1172		if (!(old & NUD_VALID))
1173			goto out;
1174		lladdr = neigh->ha;
1175	}
1176
 
 
 
 
1177	/* If entry was valid and address is not changed,
1178	   do not change entry state, if new one is STALE.
1179	 */
1180	err = 0;
1181	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1182	if (old & NUD_VALID) {
1183		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1184			update_isrouter = 0;
1185			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1186			    (old & NUD_CONNECTED)) {
1187				lladdr = neigh->ha;
1188				new = NUD_STALE;
1189			} else
1190				goto out;
1191		} else {
1192			if (lladdr == neigh->ha && new == NUD_STALE &&
1193			    !(flags & NEIGH_UPDATE_F_ADMIN))
 
 
1194				new = old;
1195		}
1196	}
1197
1198	/* Update timestamps only once we know we will make a change to the
1199	 * neighbour entry. Otherwise we risk to move the locktime window with
1200	 * noop updates and ignore relevant ARP updates.
1201	 */
1202	if (new != old || lladdr != neigh->ha) {
1203		if (new & NUD_CONNECTED)
1204			neigh->confirmed = jiffies;
1205		neigh->updated = jiffies;
1206	}
1207
1208	if (new != old) {
1209		neigh_del_timer(neigh);
1210		if (new & NUD_PROBE)
1211			atomic_set(&neigh->probes, 0);
1212		if (new & NUD_IN_TIMER)
1213			neigh_add_timer(neigh, (jiffies +
1214						((new & NUD_REACHABLE) ?
1215						 neigh->parms->reachable_time :
1216						 0)));
1217		neigh->nud_state = new;
1218		notify = 1;
1219	}
1220
1221	if (lladdr != neigh->ha) {
1222		write_seqlock(&neigh->ha_lock);
1223		memcpy(&neigh->ha, lladdr, dev->addr_len);
1224		write_sequnlock(&neigh->ha_lock);
1225		neigh_update_hhs(neigh);
1226		if (!(new & NUD_CONNECTED))
1227			neigh->confirmed = jiffies -
1228				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1229		notify = 1;
1230	}
1231	if (new == old)
1232		goto out;
1233	if (new & NUD_CONNECTED)
1234		neigh_connect(neigh);
1235	else
1236		neigh_suspect(neigh);
1237	if (!(old & NUD_VALID)) {
1238		struct sk_buff *skb;
1239
1240		/* Again: avoid dead loop if something went wrong */
1241
1242		while (neigh->nud_state & NUD_VALID &&
1243		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1244			struct dst_entry *dst = skb_dst(skb);
1245			struct neighbour *n2, *n1 = neigh;
1246			write_unlock_bh(&neigh->lock);
1247
1248			rcu_read_lock();
1249
1250			/* Why not just use 'neigh' as-is?  The problem is that
1251			 * things such as shaper, eql, and sch_teql can end up
1252			 * using alternative, different, neigh objects to output
1253			 * the packet in the output path.  So what we need to do
1254			 * here is re-lookup the top-level neigh in the path so
1255			 * we can reinject the packet there.
1256			 */
1257			n2 = NULL;
1258			if (dst) {
1259				n2 = dst_neigh_lookup_skb(dst, skb);
1260				if (n2)
1261					n1 = n2;
1262			}
1263			n1->output(n1, skb);
1264			if (n2)
1265				neigh_release(n2);
1266			rcu_read_unlock();
1267
1268			write_lock_bh(&neigh->lock);
1269		}
1270		__skb_queue_purge(&neigh->arp_queue);
1271		neigh->arp_queue_len_bytes = 0;
1272	}
1273out:
1274	if (update_isrouter) {
1275		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1276			(neigh->flags | NTF_ROUTER) :
1277			(neigh->flags & ~NTF_ROUTER);
1278	}
1279	write_unlock_bh(&neigh->lock);
1280
1281	if (notify)
1282		neigh_update_notify(neigh, nlmsg_pid);
1283
1284	return err;
1285}
1286EXPORT_SYMBOL(neigh_update);
1287
1288/* Update the neigh to listen temporarily for probe responses, even if it is
1289 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1290 */
1291void __neigh_set_probe_once(struct neighbour *neigh)
1292{
1293	if (neigh->dead)
1294		return;
1295	neigh->updated = jiffies;
1296	if (!(neigh->nud_state & NUD_FAILED))
1297		return;
1298	neigh->nud_state = NUD_INCOMPLETE;
1299	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1300	neigh_add_timer(neigh,
1301			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1302}
1303EXPORT_SYMBOL(__neigh_set_probe_once);
1304
1305struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1306				 u8 *lladdr, void *saddr,
1307				 struct net_device *dev)
1308{
1309	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1310						 lladdr || !dev->addr_len);
1311	if (neigh)
1312		neigh_update(neigh, lladdr, NUD_STALE,
1313			     NEIGH_UPDATE_F_OVERRIDE, 0);
1314	return neigh;
1315}
1316EXPORT_SYMBOL(neigh_event_ns);
1317
1318/* called with read_lock_bh(&n->lock); */
1319static void neigh_hh_init(struct neighbour *n)
1320{
1321	struct net_device *dev = n->dev;
1322	__be16 prot = n->tbl->protocol;
1323	struct hh_cache	*hh = &n->hh;
1324
1325	write_lock_bh(&n->lock);
1326
1327	/* Only one thread can come in here and initialize the
1328	 * hh_cache entry.
1329	 */
1330	if (!hh->hh_len)
1331		dev->header_ops->cache(n, hh, prot);
1332
1333	write_unlock_bh(&n->lock);
1334}
1335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1336/* Slow and careful. */
1337
1338int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1339{
 
1340	int rc = 0;
1341
 
 
 
 
 
1342	if (!neigh_event_send(neigh, skb)) {
1343		int err;
1344		struct net_device *dev = neigh->dev;
1345		unsigned int seq;
1346
1347		if (dev->header_ops->cache && !neigh->hh.hh_len)
1348			neigh_hh_init(neigh);
1349
1350		do {
1351			__skb_pull(skb, skb_network_offset(skb));
1352			seq = read_seqbegin(&neigh->ha_lock);
1353			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1354					      neigh->ha, NULL, skb->len);
1355		} while (read_seqretry(&neigh->ha_lock, seq));
1356
1357		if (err >= 0)
1358			rc = dev_queue_xmit(skb);
1359		else
1360			goto out_kfree_skb;
1361	}
1362out:
1363	return rc;
 
 
 
1364out_kfree_skb:
1365	rc = -EINVAL;
1366	kfree_skb(skb);
1367	goto out;
1368}
1369EXPORT_SYMBOL(neigh_resolve_output);
1370
1371/* As fast as possible without hh cache */
1372
1373int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1374{
1375	struct net_device *dev = neigh->dev;
1376	unsigned int seq;
1377	int err;
1378
 
 
1379	do {
1380		__skb_pull(skb, skb_network_offset(skb));
1381		seq = read_seqbegin(&neigh->ha_lock);
1382		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1383				      neigh->ha, NULL, skb->len);
1384	} while (read_seqretry(&neigh->ha_lock, seq));
1385
1386	if (err >= 0)
1387		err = dev_queue_xmit(skb);
1388	else {
1389		err = -EINVAL;
1390		kfree_skb(skb);
1391	}
1392	return err;
1393}
1394EXPORT_SYMBOL(neigh_connected_output);
1395
1396int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1397{
1398	return dev_queue_xmit(skb);
1399}
1400EXPORT_SYMBOL(neigh_direct_output);
1401
1402static void neigh_proxy_process(struct timer_list *t)
1403{
1404	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1405	long sched_next = 0;
1406	unsigned long now = jiffies;
1407	struct sk_buff *skb, *n;
1408
1409	spin_lock(&tbl->proxy_queue.lock);
1410
1411	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1412		long tdif = NEIGH_CB(skb)->sched_next - now;
1413
1414		if (tdif <= 0) {
1415			struct net_device *dev = skb->dev;
1416
1417			__skb_unlink(skb, &tbl->proxy_queue);
1418			if (tbl->proxy_redo && netif_running(dev)) {
1419				rcu_read_lock();
1420				tbl->proxy_redo(skb);
1421				rcu_read_unlock();
1422			} else {
1423				kfree_skb(skb);
1424			}
1425
1426			dev_put(dev);
1427		} else if (!sched_next || tdif < sched_next)
1428			sched_next = tdif;
1429	}
1430	del_timer(&tbl->proxy_timer);
1431	if (sched_next)
1432		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1433	spin_unlock(&tbl->proxy_queue.lock);
1434}
1435
1436void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1437		    struct sk_buff *skb)
1438{
1439	unsigned long now = jiffies;
 
1440
1441	unsigned long sched_next = now + (prandom_u32() %
1442					  NEIGH_VAR(p, PROXY_DELAY));
1443
1444	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1445		kfree_skb(skb);
1446		return;
1447	}
1448
1449	NEIGH_CB(skb)->sched_next = sched_next;
1450	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1451
1452	spin_lock(&tbl->proxy_queue.lock);
1453	if (del_timer(&tbl->proxy_timer)) {
1454		if (time_before(tbl->proxy_timer.expires, sched_next))
1455			sched_next = tbl->proxy_timer.expires;
1456	}
1457	skb_dst_drop(skb);
1458	dev_hold(skb->dev);
1459	__skb_queue_tail(&tbl->proxy_queue, skb);
1460	mod_timer(&tbl->proxy_timer, sched_next);
1461	spin_unlock(&tbl->proxy_queue.lock);
1462}
1463EXPORT_SYMBOL(pneigh_enqueue);
1464
1465static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1466						      struct net *net, int ifindex)
1467{
1468	struct neigh_parms *p;
1469
1470	list_for_each_entry(p, &tbl->parms_list, list) {
1471		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1472		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1473			return p;
1474	}
1475
1476	return NULL;
1477}
1478
1479struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1480				      struct neigh_table *tbl)
1481{
1482	struct neigh_parms *p;
1483	struct net *net = dev_net(dev);
1484	const struct net_device_ops *ops = dev->netdev_ops;
1485
1486	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
 
 
 
 
1487	if (p) {
1488		p->tbl		  = tbl;
1489		refcount_set(&p->refcnt, 1);
1490		p->reachable_time =
1491				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1492		dev_hold(dev);
1493		p->dev = dev;
1494		write_pnet(&p->net, net);
1495		p->sysctl_table = NULL;
1496
1497		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1498			dev_put(dev);
1499			kfree(p);
1500			return NULL;
1501		}
1502
 
 
 
 
1503		write_lock_bh(&tbl->lock);
1504		list_add(&p->list, &tbl->parms.list);
 
1505		write_unlock_bh(&tbl->lock);
1506
1507		neigh_parms_data_state_cleanall(p);
1508	}
1509	return p;
1510}
1511EXPORT_SYMBOL(neigh_parms_alloc);
1512
1513static void neigh_rcu_free_parms(struct rcu_head *head)
1514{
1515	struct neigh_parms *parms =
1516		container_of(head, struct neigh_parms, rcu_head);
1517
1518	neigh_parms_put(parms);
1519}
1520
1521void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1522{
 
 
1523	if (!parms || parms == &tbl->parms)
1524		return;
1525	write_lock_bh(&tbl->lock);
1526	list_del(&parms->list);
1527	parms->dead = 1;
 
 
 
 
 
 
 
 
 
1528	write_unlock_bh(&tbl->lock);
1529	if (parms->dev)
1530		dev_put(parms->dev);
1531	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1532}
1533EXPORT_SYMBOL(neigh_parms_release);
1534
1535static void neigh_parms_destroy(struct neigh_parms *parms)
1536{
 
1537	kfree(parms);
1538}
1539
1540static struct lock_class_key neigh_table_proxy_queue_class;
1541
1542static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1543
1544void neigh_table_init(int index, struct neigh_table *tbl)
1545{
1546	unsigned long now = jiffies;
1547	unsigned long phsize;
1548
1549	INIT_LIST_HEAD(&tbl->parms_list);
1550	list_add(&tbl->parms.list, &tbl->parms_list);
1551	write_pnet(&tbl->parms.net, &init_net);
1552	refcount_set(&tbl->parms.refcnt, 1);
1553	tbl->parms.reachable_time =
1554			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1555
 
 
 
 
 
1556	tbl->stats = alloc_percpu(struct neigh_statistics);
1557	if (!tbl->stats)
1558		panic("cannot create neighbour cache statistics");
1559
1560#ifdef CONFIG_PROC_FS
1561	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1562			      &neigh_stat_seq_fops, tbl))
1563		panic("cannot create neighbour proc dir entry");
1564#endif
1565
1566	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1567
1568	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1569	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1570
1571	if (!tbl->nht || !tbl->phash_buckets)
1572		panic("cannot allocate neighbour cache hashes");
1573
1574	if (!tbl->entry_size)
1575		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1576					tbl->key_len, NEIGH_PRIV_ALIGN);
1577	else
1578		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1579
1580	rwlock_init(&tbl->lock);
1581	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1582	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1583			tbl->parms.reachable_time);
1584	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1585	skb_queue_head_init_class(&tbl->proxy_queue,
1586			&neigh_table_proxy_queue_class);
1587
1588	tbl->last_flush = now;
1589	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
 
 
1590
1591	neigh_tables[index] = tbl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1592}
1593EXPORT_SYMBOL(neigh_table_init);
1594
1595int neigh_table_clear(int index, struct neigh_table *tbl)
1596{
1597	neigh_tables[index] = NULL;
 
1598	/* It is not clean... Fix it to unload IPv6 module safely */
1599	cancel_delayed_work_sync(&tbl->gc_work);
1600	del_timer_sync(&tbl->proxy_timer);
1601	pneigh_queue_purge(&tbl->proxy_queue);
1602	neigh_ifdown(tbl, NULL);
1603	if (atomic_read(&tbl->entries))
1604		pr_crit("neighbour leakage\n");
 
 
 
 
 
 
 
 
1605
1606	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1607		 neigh_hash_free_rcu);
1608	tbl->nht = NULL;
1609
1610	kfree(tbl->phash_buckets);
1611	tbl->phash_buckets = NULL;
1612
1613	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1614
1615	free_percpu(tbl->stats);
1616	tbl->stats = NULL;
1617
 
 
 
1618	return 0;
1619}
1620EXPORT_SYMBOL(neigh_table_clear);
1621
1622static struct neigh_table *neigh_find_table(int family)
1623{
1624	struct neigh_table *tbl = NULL;
1625
1626	switch (family) {
1627	case AF_INET:
1628		tbl = neigh_tables[NEIGH_ARP_TABLE];
1629		break;
1630	case AF_INET6:
1631		tbl = neigh_tables[NEIGH_ND_TABLE];
1632		break;
1633	case AF_DECnet:
1634		tbl = neigh_tables[NEIGH_DN_TABLE];
1635		break;
1636	}
1637
1638	return tbl;
1639}
1640
1641static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1642			struct netlink_ext_ack *extack)
1643{
1644	struct net *net = sock_net(skb->sk);
1645	struct ndmsg *ndm;
1646	struct nlattr *dst_attr;
1647	struct neigh_table *tbl;
1648	struct neighbour *neigh;
1649	struct net_device *dev = NULL;
1650	int err = -EINVAL;
1651
1652	ASSERT_RTNL();
1653	if (nlmsg_len(nlh) < sizeof(*ndm))
1654		goto out;
1655
1656	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1657	if (dst_attr == NULL)
1658		goto out;
1659
1660	ndm = nlmsg_data(nlh);
1661	if (ndm->ndm_ifindex) {
1662		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1663		if (dev == NULL) {
1664			err = -ENODEV;
1665			goto out;
1666		}
1667	}
1668
1669	tbl = neigh_find_table(ndm->ndm_family);
1670	if (tbl == NULL)
1671		return -EAFNOSUPPORT;
 
 
 
 
 
 
 
1672
1673	if (nla_len(dst_attr) < (int)tbl->key_len)
1674		goto out;
 
 
1675
1676	if (ndm->ndm_flags & NTF_PROXY) {
1677		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1678		goto out;
1679	}
1680
1681	if (dev == NULL)
1682		goto out;
 
 
 
1683
1684	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1685	if (neigh == NULL) {
1686		err = -ENOENT;
 
1687		goto out;
1688	}
1689
1690	err = neigh_update(neigh, NULL, NUD_FAILED,
1691			   NEIGH_UPDATE_F_OVERRIDE |
1692			   NEIGH_UPDATE_F_ADMIN,
1693			   NETLINK_CB(skb).portid);
1694	write_lock_bh(&tbl->lock);
1695	neigh_release(neigh);
1696	neigh_remove_one(neigh, tbl);
1697	write_unlock_bh(&tbl->lock);
1698
1699out:
1700	return err;
1701}
1702
1703static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1704		     struct netlink_ext_ack *extack)
1705{
1706	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1707	struct net *net = sock_net(skb->sk);
1708	struct ndmsg *ndm;
1709	struct nlattr *tb[NDA_MAX+1];
1710	struct neigh_table *tbl;
1711	struct net_device *dev = NULL;
1712	struct neighbour *neigh;
1713	void *dst, *lladdr;
1714	int err;
1715
1716	ASSERT_RTNL();
1717	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
1718	if (err < 0)
1719		goto out;
1720
1721	err = -EINVAL;
1722	if (tb[NDA_DST] == NULL)
1723		goto out;
1724
1725	ndm = nlmsg_data(nlh);
1726	if (ndm->ndm_ifindex) {
1727		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1728		if (dev == NULL) {
1729			err = -ENODEV;
1730			goto out;
1731		}
1732
1733		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1734			goto out;
1735	}
1736
1737	tbl = neigh_find_table(ndm->ndm_family);
1738	if (tbl == NULL)
1739		return -EAFNOSUPPORT;
 
 
1740
1741	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len)
1742		goto out;
1743	dst = nla_data(tb[NDA_DST]);
1744	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1745
1746	if (ndm->ndm_flags & NTF_PROXY) {
1747		struct pneigh_entry *pn;
 
 
1748
1749		err = -ENOBUFS;
1750		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1751		if (pn) {
1752			pn->flags = ndm->ndm_flags;
1753			err = 0;
1754		}
1755		goto out;
1756	}
1757
1758	if (dev == NULL)
1759		goto out;
1760
1761	neigh = neigh_lookup(tbl, dst, dev);
1762	if (neigh == NULL) {
1763		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1764			err = -ENOENT;
1765			goto out;
1766		}
1767
1768		neigh = __neigh_lookup_errno(tbl, dst, dev);
1769		if (IS_ERR(neigh)) {
1770			err = PTR_ERR(neigh);
1771			goto out;
1772		}
1773	} else {
1774		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1775			err = -EEXIST;
1776			neigh_release(neigh);
1777			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1778		}
1779
1780		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1781			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
 
 
 
 
 
1782	}
1783
1784	if (ndm->ndm_flags & NTF_USE) {
1785		neigh_event_send(neigh, NULL);
1786		err = 0;
1787	} else
1788		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1789				   NETLINK_CB(skb).portid);
1790	neigh_release(neigh);
1791
1792out:
1793	return err;
1794}
1795
1796static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1797{
1798	struct nlattr *nest;
1799
1800	nest = nla_nest_start(skb, NDTA_PARMS);
1801	if (nest == NULL)
1802		return -ENOBUFS;
1803
1804	if ((parms->dev &&
1805	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1806	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1807	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1808			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1809	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1810	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1811			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1812	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1813	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1814	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1815			NEIGH_VAR(parms, UCAST_PROBES)) ||
1816	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1817			NEIGH_VAR(parms, MCAST_PROBES)) ||
1818	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1819			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1820	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1821			  NDTPA_PAD) ||
1822	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1823			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1824	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1825			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1826	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1827			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1828	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1829			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1830	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1831			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1832	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1833			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1834	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1835			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1836		goto nla_put_failure;
1837	return nla_nest_end(skb, nest);
1838
1839nla_put_failure:
1840	nla_nest_cancel(skb, nest);
1841	return -EMSGSIZE;
1842}
1843
1844static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1845			      u32 pid, u32 seq, int type, int flags)
1846{
1847	struct nlmsghdr *nlh;
1848	struct ndtmsg *ndtmsg;
1849
1850	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1851	if (nlh == NULL)
1852		return -EMSGSIZE;
1853
1854	ndtmsg = nlmsg_data(nlh);
1855
1856	read_lock_bh(&tbl->lock);
1857	ndtmsg->ndtm_family = tbl->family;
1858	ndtmsg->ndtm_pad1   = 0;
1859	ndtmsg->ndtm_pad2   = 0;
1860
1861	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1862	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1863	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1864	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1865	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1866		goto nla_put_failure;
1867	{
1868		unsigned long now = jiffies;
1869		unsigned int flush_delta = now - tbl->last_flush;
1870		unsigned int rand_delta = now - tbl->last_rand;
1871		struct neigh_hash_table *nht;
1872		struct ndt_config ndc = {
1873			.ndtc_key_len		= tbl->key_len,
1874			.ndtc_entry_size	= tbl->entry_size,
1875			.ndtc_entries		= atomic_read(&tbl->entries),
1876			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1877			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1878			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1879		};
1880
1881		rcu_read_lock_bh();
1882		nht = rcu_dereference_bh(tbl->nht);
1883		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1884		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1885		rcu_read_unlock_bh();
1886
1887		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1888			goto nla_put_failure;
1889	}
1890
1891	{
1892		int cpu;
1893		struct ndt_stats ndst;
1894
1895		memset(&ndst, 0, sizeof(ndst));
1896
1897		for_each_possible_cpu(cpu) {
1898			struct neigh_statistics	*st;
1899
1900			st = per_cpu_ptr(tbl->stats, cpu);
1901			ndst.ndts_allocs		+= st->allocs;
1902			ndst.ndts_destroys		+= st->destroys;
1903			ndst.ndts_hash_grows		+= st->hash_grows;
1904			ndst.ndts_res_failed		+= st->res_failed;
1905			ndst.ndts_lookups		+= st->lookups;
1906			ndst.ndts_hits			+= st->hits;
1907			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1908			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1909			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1910			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1911			ndst.ndts_table_fulls		+= st->table_fulls;
1912		}
1913
1914		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1915				  NDTA_PAD))
1916			goto nla_put_failure;
1917	}
1918
1919	BUG_ON(tbl->parms.dev);
1920	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1921		goto nla_put_failure;
1922
1923	read_unlock_bh(&tbl->lock);
1924	nlmsg_end(skb, nlh);
1925	return 0;
1926
1927nla_put_failure:
1928	read_unlock_bh(&tbl->lock);
1929	nlmsg_cancel(skb, nlh);
1930	return -EMSGSIZE;
1931}
1932
1933static int neightbl_fill_param_info(struct sk_buff *skb,
1934				    struct neigh_table *tbl,
1935				    struct neigh_parms *parms,
1936				    u32 pid, u32 seq, int type,
1937				    unsigned int flags)
1938{
1939	struct ndtmsg *ndtmsg;
1940	struct nlmsghdr *nlh;
1941
1942	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1943	if (nlh == NULL)
1944		return -EMSGSIZE;
1945
1946	ndtmsg = nlmsg_data(nlh);
1947
1948	read_lock_bh(&tbl->lock);
1949	ndtmsg->ndtm_family = tbl->family;
1950	ndtmsg->ndtm_pad1   = 0;
1951	ndtmsg->ndtm_pad2   = 0;
1952
1953	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1954	    neightbl_fill_parms(skb, parms) < 0)
1955		goto errout;
1956
1957	read_unlock_bh(&tbl->lock);
1958	nlmsg_end(skb, nlh);
1959	return 0;
1960errout:
1961	read_unlock_bh(&tbl->lock);
1962	nlmsg_cancel(skb, nlh);
1963	return -EMSGSIZE;
1964}
1965
1966static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1967	[NDTA_NAME]		= { .type = NLA_STRING },
1968	[NDTA_THRESH1]		= { .type = NLA_U32 },
1969	[NDTA_THRESH2]		= { .type = NLA_U32 },
1970	[NDTA_THRESH3]		= { .type = NLA_U32 },
1971	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1972	[NDTA_PARMS]		= { .type = NLA_NESTED },
1973};
1974
1975static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1976	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1977	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1978	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1979	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1980	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1981	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1982	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1983	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1984	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1985	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1986	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1987	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1988	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1989	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1990};
1991
1992static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
1993			struct netlink_ext_ack *extack)
1994{
1995	struct net *net = sock_net(skb->sk);
1996	struct neigh_table *tbl;
1997	struct ndtmsg *ndtmsg;
1998	struct nlattr *tb[NDTA_MAX+1];
1999	bool found = false;
2000	int err, tidx;
2001
2002	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2003			  nl_neightbl_policy, extack);
2004	if (err < 0)
2005		goto errout;
2006
2007	if (tb[NDTA_NAME] == NULL) {
2008		err = -EINVAL;
2009		goto errout;
2010	}
2011
2012	ndtmsg = nlmsg_data(nlh);
2013
2014	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2015		tbl = neigh_tables[tidx];
2016		if (!tbl)
2017			continue;
2018		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2019			continue;
2020		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2021			found = true;
2022			break;
2023		}
2024	}
2025
2026	if (!found)
2027		return -ENOENT;
 
 
2028
2029	/*
2030	 * We acquire tbl->lock to be nice to the periodic timers and
2031	 * make sure they always see a consistent set of values.
2032	 */
2033	write_lock_bh(&tbl->lock);
2034
2035	if (tb[NDTA_PARMS]) {
2036		struct nlattr *tbp[NDTPA_MAX+1];
2037		struct neigh_parms *p;
2038		int i, ifindex = 0;
2039
2040		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2041				       nl_ntbl_parm_policy, extack);
2042		if (err < 0)
2043			goto errout_tbl_lock;
2044
2045		if (tbp[NDTPA_IFINDEX])
2046			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2047
2048		p = lookup_neigh_parms(tbl, net, ifindex);
2049		if (p == NULL) {
2050			err = -ENOENT;
2051			goto errout_tbl_lock;
2052		}
2053
2054		for (i = 1; i <= NDTPA_MAX; i++) {
2055			if (tbp[i] == NULL)
2056				continue;
2057
2058			switch (i) {
2059			case NDTPA_QUEUE_LEN:
2060				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2061					      nla_get_u32(tbp[i]) *
2062					      SKB_TRUESIZE(ETH_FRAME_LEN));
2063				break;
2064			case NDTPA_QUEUE_LENBYTES:
2065				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2066					      nla_get_u32(tbp[i]));
2067				break;
2068			case NDTPA_PROXY_QLEN:
2069				NEIGH_VAR_SET(p, PROXY_QLEN,
2070					      nla_get_u32(tbp[i]));
2071				break;
2072			case NDTPA_APP_PROBES:
2073				NEIGH_VAR_SET(p, APP_PROBES,
2074					      nla_get_u32(tbp[i]));
2075				break;
2076			case NDTPA_UCAST_PROBES:
2077				NEIGH_VAR_SET(p, UCAST_PROBES,
2078					      nla_get_u32(tbp[i]));
2079				break;
2080			case NDTPA_MCAST_PROBES:
2081				NEIGH_VAR_SET(p, MCAST_PROBES,
2082					      nla_get_u32(tbp[i]));
2083				break;
2084			case NDTPA_MCAST_REPROBES:
2085				NEIGH_VAR_SET(p, MCAST_REPROBES,
2086					      nla_get_u32(tbp[i]));
2087				break;
2088			case NDTPA_BASE_REACHABLE_TIME:
2089				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2090					      nla_get_msecs(tbp[i]));
2091				/* update reachable_time as well, otherwise, the change will
2092				 * only be effective after the next time neigh_periodic_work
2093				 * decides to recompute it (can be multiple minutes)
2094				 */
2095				p->reachable_time =
2096					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2097				break;
2098			case NDTPA_GC_STALETIME:
2099				NEIGH_VAR_SET(p, GC_STALETIME,
2100					      nla_get_msecs(tbp[i]));
2101				break;
2102			case NDTPA_DELAY_PROBE_TIME:
2103				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2104					      nla_get_msecs(tbp[i]));
2105				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2106				break;
2107			case NDTPA_RETRANS_TIME:
2108				NEIGH_VAR_SET(p, RETRANS_TIME,
2109					      nla_get_msecs(tbp[i]));
2110				break;
2111			case NDTPA_ANYCAST_DELAY:
2112				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2113					      nla_get_msecs(tbp[i]));
2114				break;
2115			case NDTPA_PROXY_DELAY:
2116				NEIGH_VAR_SET(p, PROXY_DELAY,
2117					      nla_get_msecs(tbp[i]));
2118				break;
2119			case NDTPA_LOCKTIME:
2120				NEIGH_VAR_SET(p, LOCKTIME,
2121					      nla_get_msecs(tbp[i]));
2122				break;
2123			}
2124		}
2125	}
2126
2127	err = -ENOENT;
2128	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2129	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2130	    !net_eq(net, &init_net))
2131		goto errout_tbl_lock;
2132
2133	if (tb[NDTA_THRESH1])
2134		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2135
2136	if (tb[NDTA_THRESH2])
2137		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2138
2139	if (tb[NDTA_THRESH3])
2140		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2141
2142	if (tb[NDTA_GC_INTERVAL])
2143		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2144
2145	err = 0;
2146
2147errout_tbl_lock:
2148	write_unlock_bh(&tbl->lock);
 
 
2149errout:
2150	return err;
2151}
2152
2153static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2154{
2155	struct net *net = sock_net(skb->sk);
2156	int family, tidx, nidx = 0;
2157	int tbl_skip = cb->args[0];
2158	int neigh_skip = cb->args[1];
2159	struct neigh_table *tbl;
2160
2161	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2162
2163	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
 
2164		struct neigh_parms *p;
2165
2166		tbl = neigh_tables[tidx];
2167		if (!tbl)
2168			continue;
2169
2170		if (tidx < tbl_skip || (family && tbl->family != family))
2171			continue;
2172
2173		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2174				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2175				       NLM_F_MULTI) < 0)
2176			break;
2177
2178		nidx = 0;
2179		p = list_next_entry(&tbl->parms, list);
2180		list_for_each_entry_from(p, &tbl->parms_list, list) {
2181			if (!net_eq(neigh_parms_net(p), net))
2182				continue;
2183
2184			if (nidx < neigh_skip)
2185				goto next;
2186
2187			if (neightbl_fill_param_info(skb, tbl, p,
2188						     NETLINK_CB(cb->skb).portid,
2189						     cb->nlh->nlmsg_seq,
2190						     RTM_NEWNEIGHTBL,
2191						     NLM_F_MULTI) < 0)
2192				goto out;
2193		next:
2194			nidx++;
2195		}
2196
2197		neigh_skip = 0;
2198	}
2199out:
 
2200	cb->args[0] = tidx;
2201	cb->args[1] = nidx;
2202
2203	return skb->len;
2204}
2205
2206static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2207			   u32 pid, u32 seq, int type, unsigned int flags)
2208{
2209	unsigned long now = jiffies;
2210	struct nda_cacheinfo ci;
2211	struct nlmsghdr *nlh;
2212	struct ndmsg *ndm;
2213
2214	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2215	if (nlh == NULL)
2216		return -EMSGSIZE;
2217
2218	ndm = nlmsg_data(nlh);
2219	ndm->ndm_family	 = neigh->ops->family;
2220	ndm->ndm_pad1    = 0;
2221	ndm->ndm_pad2    = 0;
2222	ndm->ndm_flags	 = neigh->flags;
2223	ndm->ndm_type	 = neigh->type;
2224	ndm->ndm_ifindex = neigh->dev->ifindex;
2225
2226	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2227		goto nla_put_failure;
2228
2229	read_lock_bh(&neigh->lock);
2230	ndm->ndm_state	 = neigh->nud_state;
2231	if (neigh->nud_state & NUD_VALID) {
2232		char haddr[MAX_ADDR_LEN];
2233
2234		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2235		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2236			read_unlock_bh(&neigh->lock);
2237			goto nla_put_failure;
2238		}
2239	}
2240
2241	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2242	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2243	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2244	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2245	read_unlock_bh(&neigh->lock);
2246
2247	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2248	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2249		goto nla_put_failure;
2250
2251	nlmsg_end(skb, nlh);
2252	return 0;
2253
2254nla_put_failure:
2255	nlmsg_cancel(skb, nlh);
2256	return -EMSGSIZE;
2257}
2258
2259static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2260			    u32 pid, u32 seq, int type, unsigned int flags,
2261			    struct neigh_table *tbl)
2262{
2263	struct nlmsghdr *nlh;
2264	struct ndmsg *ndm;
2265
2266	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2267	if (nlh == NULL)
2268		return -EMSGSIZE;
2269
2270	ndm = nlmsg_data(nlh);
2271	ndm->ndm_family	 = tbl->family;
2272	ndm->ndm_pad1    = 0;
2273	ndm->ndm_pad2    = 0;
2274	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2275	ndm->ndm_type	 = RTN_UNICAST;
2276	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2277	ndm->ndm_state	 = NUD_NONE;
2278
2279	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2280		goto nla_put_failure;
2281
2282	nlmsg_end(skb, nlh);
2283	return 0;
2284
2285nla_put_failure:
2286	nlmsg_cancel(skb, nlh);
2287	return -EMSGSIZE;
2288}
2289
2290static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2291{
2292	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2293	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2294}
2295
2296static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2297{
2298	struct net_device *master;
2299
2300	if (!master_idx)
2301		return false;
2302
2303	master = netdev_master_upper_dev_get(dev);
2304	if (!master || master->ifindex != master_idx)
2305		return true;
2306
2307	return false;
2308}
2309
2310static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2311{
2312	if (filter_idx && dev->ifindex != filter_idx)
2313		return true;
2314
2315	return false;
2316}
2317
2318static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2319			    struct netlink_callback *cb)
2320{
2321	struct net *net = sock_net(skb->sk);
2322	const struct nlmsghdr *nlh = cb->nlh;
2323	struct nlattr *tb[NDA_MAX + 1];
2324	struct neighbour *n;
2325	int rc, h, s_h = cb->args[1];
2326	int idx, s_idx = idx = cb->args[2];
2327	struct neigh_hash_table *nht;
2328	int filter_master_idx = 0, filter_idx = 0;
2329	unsigned int flags = NLM_F_MULTI;
2330	int err;
2331
2332	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
2333	if (!err) {
2334		if (tb[NDA_IFINDEX]) {
2335			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2336				return -EINVAL;
2337			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2338		}
2339		if (tb[NDA_MASTER]) {
2340			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2341				return -EINVAL;
2342			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2343		}
2344		if (filter_idx || filter_master_idx)
2345			flags |= NLM_F_DUMP_FILTERED;
2346	}
2347
2348	rcu_read_lock_bh();
2349	nht = rcu_dereference_bh(tbl->nht);
2350
2351	for (h = s_h; h < (1 << nht->hash_shift); h++) {
 
 
2352		if (h > s_h)
2353			s_idx = 0;
2354		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2355		     n != NULL;
2356		     n = rcu_dereference_bh(n->next)) {
2357			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2358				goto next;
2359			if (neigh_ifindex_filtered(n->dev, filter_idx) ||
2360			    neigh_master_filtered(n->dev, filter_master_idx))
2361				goto next;
2362			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2363					    cb->nlh->nlmsg_seq,
2364					    RTM_NEWNEIGH,
2365					    flags) < 0) {
2366				rc = -1;
2367				goto out;
2368			}
2369next:
2370			idx++;
2371		}
2372	}
2373	rc = skb->len;
2374out:
2375	rcu_read_unlock_bh();
2376	cb->args[1] = h;
2377	cb->args[2] = idx;
2378	return rc;
2379}
2380
2381static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2382			     struct netlink_callback *cb)
2383{
2384	struct pneigh_entry *n;
2385	struct net *net = sock_net(skb->sk);
2386	int rc, h, s_h = cb->args[3];
2387	int idx, s_idx = idx = cb->args[4];
2388
2389	read_lock_bh(&tbl->lock);
2390
2391	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2392		if (h > s_h)
2393			s_idx = 0;
2394		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2395			if (idx < s_idx || pneigh_net(n) != net)
2396				goto next;
2397			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2398					    cb->nlh->nlmsg_seq,
2399					    RTM_NEWNEIGH,
2400					    NLM_F_MULTI, tbl) < 0) {
2401				read_unlock_bh(&tbl->lock);
2402				rc = -1;
2403				goto out;
2404			}
2405		next:
2406			idx++;
2407		}
2408	}
2409
2410	read_unlock_bh(&tbl->lock);
2411	rc = skb->len;
2412out:
2413	cb->args[3] = h;
2414	cb->args[4] = idx;
2415	return rc;
2416
2417}
2418
2419static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2420{
2421	struct neigh_table *tbl;
2422	int t, family, s_t;
2423	int proxy = 0;
2424	int err;
2425
 
2426	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2427
2428	/* check for full ndmsg structure presence, family member is
2429	 * the same for both structures
2430	 */
2431	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2432	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2433		proxy = 1;
2434
2435	s_t = cb->args[0];
2436
2437	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2438		tbl = neigh_tables[t];
2439
2440		if (!tbl)
2441			continue;
2442		if (t < s_t || (family && tbl->family != family))
2443			continue;
2444		if (t > s_t)
2445			memset(&cb->args[1], 0, sizeof(cb->args) -
2446						sizeof(cb->args[0]));
2447		if (proxy)
2448			err = pneigh_dump_table(tbl, skb, cb);
2449		else
2450			err = neigh_dump_table(tbl, skb, cb);
2451		if (err < 0)
2452			break;
2453	}
 
2454
2455	cb->args[0] = t;
2456	return skb->len;
2457}
2458
2459void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2460{
2461	int chain;
2462	struct neigh_hash_table *nht;
2463
2464	rcu_read_lock_bh();
2465	nht = rcu_dereference_bh(tbl->nht);
2466
2467	read_lock(&tbl->lock); /* avoid resizes */
2468	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2469		struct neighbour *n;
2470
2471		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2472		     n != NULL;
2473		     n = rcu_dereference_bh(n->next))
2474			cb(n, cookie);
2475	}
2476	read_unlock(&tbl->lock);
2477	rcu_read_unlock_bh();
2478}
2479EXPORT_SYMBOL(neigh_for_each);
2480
2481/* The tbl->lock must be held as a writer and BH disabled. */
2482void __neigh_for_each_release(struct neigh_table *tbl,
2483			      int (*cb)(struct neighbour *))
2484{
2485	int chain;
2486	struct neigh_hash_table *nht;
2487
2488	nht = rcu_dereference_protected(tbl->nht,
2489					lockdep_is_held(&tbl->lock));
2490	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2491		struct neighbour *n;
2492		struct neighbour __rcu **np;
2493
2494		np = &nht->hash_buckets[chain];
2495		while ((n = rcu_dereference_protected(*np,
2496					lockdep_is_held(&tbl->lock))) != NULL) {
2497			int release;
2498
2499			write_lock(&n->lock);
2500			release = cb(n);
2501			if (release) {
2502				rcu_assign_pointer(*np,
2503					rcu_dereference_protected(n->next,
2504						lockdep_is_held(&tbl->lock)));
2505				n->dead = 1;
2506			} else
2507				np = &n->next;
2508			write_unlock(&n->lock);
2509			if (release)
2510				neigh_cleanup_and_release(n);
2511		}
2512	}
2513}
2514EXPORT_SYMBOL(__neigh_for_each_release);
2515
2516int neigh_xmit(int index, struct net_device *dev,
2517	       const void *addr, struct sk_buff *skb)
2518{
2519	int err = -EAFNOSUPPORT;
2520	if (likely(index < NEIGH_NR_TABLES)) {
2521		struct neigh_table *tbl;
2522		struct neighbour *neigh;
2523
2524		tbl = neigh_tables[index];
2525		if (!tbl)
2526			goto out;
2527		rcu_read_lock_bh();
2528		neigh = __neigh_lookup_noref(tbl, addr, dev);
2529		if (!neigh)
2530			neigh = __neigh_create(tbl, addr, dev, false);
2531		err = PTR_ERR(neigh);
2532		if (IS_ERR(neigh)) {
2533			rcu_read_unlock_bh();
2534			goto out_kfree_skb;
2535		}
2536		err = neigh->output(neigh, skb);
2537		rcu_read_unlock_bh();
2538	}
2539	else if (index == NEIGH_LINK_TABLE) {
2540		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2541				      addr, NULL, skb->len);
2542		if (err < 0)
2543			goto out_kfree_skb;
2544		err = dev_queue_xmit(skb);
2545	}
2546out:
2547	return err;
2548out_kfree_skb:
2549	kfree_skb(skb);
2550	goto out;
2551}
2552EXPORT_SYMBOL(neigh_xmit);
2553
2554#ifdef CONFIG_PROC_FS
2555
2556static struct neighbour *neigh_get_first(struct seq_file *seq)
2557{
2558	struct neigh_seq_state *state = seq->private;
2559	struct net *net = seq_file_net(seq);
2560	struct neigh_hash_table *nht = state->nht;
2561	struct neighbour *n = NULL;
2562	int bucket = state->bucket;
2563
2564	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2565	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2566		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2567
2568		while (n) {
2569			if (!net_eq(dev_net(n->dev), net))
2570				goto next;
2571			if (state->neigh_sub_iter) {
2572				loff_t fakep = 0;
2573				void *v;
2574
2575				v = state->neigh_sub_iter(state, n, &fakep);
2576				if (!v)
2577					goto next;
2578			}
2579			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2580				break;
2581			if (n->nud_state & ~NUD_NOARP)
2582				break;
2583next:
2584			n = rcu_dereference_bh(n->next);
2585		}
2586
2587		if (n)
2588			break;
2589	}
2590	state->bucket = bucket;
2591
2592	return n;
2593}
2594
2595static struct neighbour *neigh_get_next(struct seq_file *seq,
2596					struct neighbour *n,
2597					loff_t *pos)
2598{
2599	struct neigh_seq_state *state = seq->private;
2600	struct net *net = seq_file_net(seq);
2601	struct neigh_hash_table *nht = state->nht;
2602
2603	if (state->neigh_sub_iter) {
2604		void *v = state->neigh_sub_iter(state, n, pos);
2605		if (v)
2606			return n;
2607	}
2608	n = rcu_dereference_bh(n->next);
2609
2610	while (1) {
2611		while (n) {
2612			if (!net_eq(dev_net(n->dev), net))
2613				goto next;
2614			if (state->neigh_sub_iter) {
2615				void *v = state->neigh_sub_iter(state, n, pos);
2616				if (v)
2617					return n;
2618				goto next;
2619			}
2620			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2621				break;
2622
2623			if (n->nud_state & ~NUD_NOARP)
2624				break;
2625next:
2626			n = rcu_dereference_bh(n->next);
2627		}
2628
2629		if (n)
2630			break;
2631
2632		if (++state->bucket >= (1 << nht->hash_shift))
2633			break;
2634
2635		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2636	}
2637
2638	if (n && pos)
2639		--(*pos);
2640	return n;
2641}
2642
2643static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2644{
2645	struct neighbour *n = neigh_get_first(seq);
2646
2647	if (n) {
2648		--(*pos);
2649		while (*pos) {
2650			n = neigh_get_next(seq, n, pos);
2651			if (!n)
2652				break;
2653		}
2654	}
2655	return *pos ? NULL : n;
2656}
2657
2658static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2659{
2660	struct neigh_seq_state *state = seq->private;
2661	struct net *net = seq_file_net(seq);
2662	struct neigh_table *tbl = state->tbl;
2663	struct pneigh_entry *pn = NULL;
2664	int bucket = state->bucket;
2665
2666	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2667	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2668		pn = tbl->phash_buckets[bucket];
2669		while (pn && !net_eq(pneigh_net(pn), net))
2670			pn = pn->next;
2671		if (pn)
2672			break;
2673	}
2674	state->bucket = bucket;
2675
2676	return pn;
2677}
2678
2679static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2680					    struct pneigh_entry *pn,
2681					    loff_t *pos)
2682{
2683	struct neigh_seq_state *state = seq->private;
2684	struct net *net = seq_file_net(seq);
2685	struct neigh_table *tbl = state->tbl;
2686
2687	do {
2688		pn = pn->next;
2689	} while (pn && !net_eq(pneigh_net(pn), net));
2690
2691	while (!pn) {
2692		if (++state->bucket > PNEIGH_HASHMASK)
2693			break;
2694		pn = tbl->phash_buckets[state->bucket];
2695		while (pn && !net_eq(pneigh_net(pn), net))
2696			pn = pn->next;
2697		if (pn)
2698			break;
2699	}
2700
2701	if (pn && pos)
2702		--(*pos);
2703
2704	return pn;
2705}
2706
2707static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2708{
2709	struct pneigh_entry *pn = pneigh_get_first(seq);
2710
2711	if (pn) {
2712		--(*pos);
2713		while (*pos) {
2714			pn = pneigh_get_next(seq, pn, pos);
2715			if (!pn)
2716				break;
2717		}
2718	}
2719	return *pos ? NULL : pn;
2720}
2721
2722static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2723{
2724	struct neigh_seq_state *state = seq->private;
2725	void *rc;
2726	loff_t idxpos = *pos;
2727
2728	rc = neigh_get_idx(seq, &idxpos);
2729	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2730		rc = pneigh_get_idx(seq, &idxpos);
2731
2732	return rc;
2733}
2734
2735void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2736	__acquires(rcu_bh)
2737{
2738	struct neigh_seq_state *state = seq->private;
2739
2740	state->tbl = tbl;
2741	state->bucket = 0;
2742	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2743
2744	rcu_read_lock_bh();
2745	state->nht = rcu_dereference_bh(tbl->nht);
2746
2747	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2748}
2749EXPORT_SYMBOL(neigh_seq_start);
2750
2751void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2752{
2753	struct neigh_seq_state *state;
2754	void *rc;
2755
2756	if (v == SEQ_START_TOKEN) {
2757		rc = neigh_get_first(seq);
2758		goto out;
2759	}
2760
2761	state = seq->private;
2762	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2763		rc = neigh_get_next(seq, v, NULL);
2764		if (rc)
2765			goto out;
2766		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2767			rc = pneigh_get_first(seq);
2768	} else {
2769		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2770		rc = pneigh_get_next(seq, v, NULL);
2771	}
2772out:
2773	++(*pos);
2774	return rc;
2775}
2776EXPORT_SYMBOL(neigh_seq_next);
2777
2778void neigh_seq_stop(struct seq_file *seq, void *v)
2779	__releases(rcu_bh)
2780{
2781	rcu_read_unlock_bh();
2782}
2783EXPORT_SYMBOL(neigh_seq_stop);
2784
2785/* statistics via seq_file */
2786
2787static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2788{
2789	struct neigh_table *tbl = seq->private;
2790	int cpu;
2791
2792	if (*pos == 0)
2793		return SEQ_START_TOKEN;
2794
2795	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2796		if (!cpu_possible(cpu))
2797			continue;
2798		*pos = cpu+1;
2799		return per_cpu_ptr(tbl->stats, cpu);
2800	}
2801	return NULL;
2802}
2803
2804static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2805{
2806	struct neigh_table *tbl = seq->private;
2807	int cpu;
2808
2809	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2810		if (!cpu_possible(cpu))
2811			continue;
2812		*pos = cpu+1;
2813		return per_cpu_ptr(tbl->stats, cpu);
2814	}
2815	return NULL;
2816}
2817
2818static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2819{
2820
2821}
2822
2823static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2824{
2825	struct neigh_table *tbl = seq->private;
2826	struct neigh_statistics *st = v;
2827
2828	if (v == SEQ_START_TOKEN) {
2829		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2830		return 0;
2831	}
2832
2833	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2834			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2835		   atomic_read(&tbl->entries),
2836
2837		   st->allocs,
2838		   st->destroys,
2839		   st->hash_grows,
2840
2841		   st->lookups,
2842		   st->hits,
2843
2844		   st->res_failed,
2845
2846		   st->rcv_probes_mcast,
2847		   st->rcv_probes_ucast,
2848
2849		   st->periodic_gc_runs,
2850		   st->forced_gc_runs,
2851		   st->unres_discards,
2852		   st->table_fulls
2853		   );
2854
2855	return 0;
2856}
2857
2858static const struct seq_operations neigh_stat_seq_ops = {
2859	.start	= neigh_stat_seq_start,
2860	.next	= neigh_stat_seq_next,
2861	.stop	= neigh_stat_seq_stop,
2862	.show	= neigh_stat_seq_show,
2863};
2864
2865static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2866{
2867	int ret = seq_open(file, &neigh_stat_seq_ops);
2868
2869	if (!ret) {
2870		struct seq_file *sf = file->private_data;
2871		sf->private = PDE_DATA(inode);
2872	}
2873	return ret;
2874};
2875
2876static const struct file_operations neigh_stat_seq_fops = {
 
2877	.open 	 = neigh_stat_seq_open,
2878	.read	 = seq_read,
2879	.llseek	 = seq_lseek,
2880	.release = seq_release,
2881};
2882
2883#endif /* CONFIG_PROC_FS */
2884
2885static inline size_t neigh_nlmsg_size(void)
2886{
2887	return NLMSG_ALIGN(sizeof(struct ndmsg))
2888	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2889	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2890	       + nla_total_size(sizeof(struct nda_cacheinfo))
2891	       + nla_total_size(4); /* NDA_PROBES */
2892}
2893
2894static void __neigh_notify(struct neighbour *n, int type, int flags,
2895			   u32 pid)
2896{
2897	struct net *net = dev_net(n->dev);
2898	struct sk_buff *skb;
2899	int err = -ENOBUFS;
2900
2901	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2902	if (skb == NULL)
2903		goto errout;
2904
2905	err = neigh_fill_info(skb, n, pid, 0, type, flags);
2906	if (err < 0) {
2907		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2908		WARN_ON(err == -EMSGSIZE);
2909		kfree_skb(skb);
2910		goto errout;
2911	}
2912	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2913	return;
2914errout:
2915	if (err < 0)
2916		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2917}
2918
 
2919void neigh_app_ns(struct neighbour *n)
2920{
2921	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
2922}
2923EXPORT_SYMBOL(neigh_app_ns);
 
2924
2925#ifdef CONFIG_SYSCTL
2926static int zero;
2927static int int_max = INT_MAX;
2928static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2929
2930static int proc_unres_qlen(struct ctl_table *ctl, int write,
2931			   void __user *buffer, size_t *lenp, loff_t *ppos)
2932{
2933	int size, ret;
2934	struct ctl_table tmp = *ctl;
2935
2936	tmp.extra1 = &zero;
2937	tmp.extra2 = &unres_qlen_max;
2938	tmp.data = &size;
2939
2940	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2941	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2942
2943	if (write && !ret)
2944		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2945	return ret;
2946}
2947
2948static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2949						   int family)
2950{
2951	switch (family) {
2952	case AF_INET:
2953		return __in_dev_arp_parms_get_rcu(dev);
2954	case AF_INET6:
2955		return __in6_dev_nd_parms_get_rcu(dev);
2956	}
2957	return NULL;
2958}
2959
2960static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2961				  int index)
2962{
2963	struct net_device *dev;
2964	int family = neigh_parms_family(p);
2965
2966	rcu_read_lock();
2967	for_each_netdev_rcu(net, dev) {
2968		struct neigh_parms *dst_p =
2969				neigh_get_dev_parms_rcu(dev, family);
2970
2971		if (dst_p && !test_bit(index, dst_p->data_state))
2972			dst_p->data[index] = p->data[index];
2973	}
2974	rcu_read_unlock();
2975}
2976
2977static void neigh_proc_update(struct ctl_table *ctl, int write)
2978{
2979	struct net_device *dev = ctl->extra1;
2980	struct neigh_parms *p = ctl->extra2;
2981	struct net *net = neigh_parms_net(p);
2982	int index = (int *) ctl->data - p->data;
2983
2984	if (!write)
2985		return;
2986
2987	set_bit(index, p->data_state);
2988	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2989		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2990	if (!dev) /* NULL dev means this is default value */
2991		neigh_copy_dflt_parms(net, p, index);
2992}
2993
2994static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2995					   void __user *buffer,
2996					   size_t *lenp, loff_t *ppos)
2997{
2998	struct ctl_table tmp = *ctl;
2999	int ret;
3000
3001	tmp.extra1 = &zero;
3002	tmp.extra2 = &int_max;
3003
3004	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3005	neigh_proc_update(ctl, write);
3006	return ret;
3007}
3008
3009int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3010			void __user *buffer, size_t *lenp, loff_t *ppos)
3011{
3012	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3013
3014	neigh_proc_update(ctl, write);
3015	return ret;
3016}
3017EXPORT_SYMBOL(neigh_proc_dointvec);
3018
3019int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3020				void __user *buffer,
3021				size_t *lenp, loff_t *ppos)
3022{
3023	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3024
3025	neigh_proc_update(ctl, write);
3026	return ret;
3027}
3028EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3029
3030static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3031					      void __user *buffer,
3032					      size_t *lenp, loff_t *ppos)
3033{
3034	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3035
3036	neigh_proc_update(ctl, write);
3037	return ret;
3038}
3039
3040int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3041				   void __user *buffer,
3042				   size_t *lenp, loff_t *ppos)
3043{
3044	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3045
3046	neigh_proc_update(ctl, write);
3047	return ret;
3048}
3049EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3050
3051static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3052					  void __user *buffer,
3053					  size_t *lenp, loff_t *ppos)
3054{
3055	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3056
3057	neigh_proc_update(ctl, write);
3058	return ret;
3059}
3060
3061static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3062					  void __user *buffer,
3063					  size_t *lenp, loff_t *ppos)
3064{
3065	struct neigh_parms *p = ctl->extra2;
3066	int ret;
3067
3068	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3069		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3070	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3071		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3072	else
3073		ret = -1;
3074
3075	if (write && ret == 0) {
3076		/* update reachable_time as well, otherwise, the change will
3077		 * only be effective after the next time neigh_periodic_work
3078		 * decides to recompute it
3079		 */
3080		p->reachable_time =
3081			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3082	}
3083	return ret;
3084}
3085
3086#define NEIGH_PARMS_DATA_OFFSET(index)	\
3087	(&((struct neigh_parms *) 0)->data[index])
3088
3089#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3090	[NEIGH_VAR_ ## attr] = { \
3091		.procname	= name, \
3092		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3093		.maxlen		= sizeof(int), \
3094		.mode		= mval, \
3095		.proc_handler	= proc, \
3096	}
3097
3098#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3099	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3100
3101#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3102	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3103
3104#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3105	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3106
3107#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3108	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3109
3110#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3111	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3112
3113#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3114	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3115
3116static struct neigh_sysctl_table {
3117	struct ctl_table_header *sysctl_header;
3118	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
 
3119} neigh_sysctl_template __read_mostly = {
3120	.neigh_vars = {
3121		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3122		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3123		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3124		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3125		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3126		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3127		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3128		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3129		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3130		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3131		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3132		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3133		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3134		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3135		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3136		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3137		[NEIGH_VAR_GC_INTERVAL] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3138			.procname	= "gc_interval",
3139			.maxlen		= sizeof(int),
3140			.mode		= 0644,
3141			.proc_handler	= proc_dointvec_jiffies,
3142		},
3143		[NEIGH_VAR_GC_THRESH1] = {
3144			.procname	= "gc_thresh1",
3145			.maxlen		= sizeof(int),
3146			.mode		= 0644,
3147			.extra1 	= &zero,
3148			.extra2		= &int_max,
3149			.proc_handler	= proc_dointvec_minmax,
3150		},
3151		[NEIGH_VAR_GC_THRESH2] = {
3152			.procname	= "gc_thresh2",
3153			.maxlen		= sizeof(int),
3154			.mode		= 0644,
3155			.extra1 	= &zero,
3156			.extra2		= &int_max,
3157			.proc_handler	= proc_dointvec_minmax,
3158		},
3159		[NEIGH_VAR_GC_THRESH3] = {
3160			.procname	= "gc_thresh3",
3161			.maxlen		= sizeof(int),
3162			.mode		= 0644,
3163			.extra1 	= &zero,
3164			.extra2		= &int_max,
3165			.proc_handler	= proc_dointvec_minmax,
3166		},
3167		{},
3168	},
3169};
3170
3171int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3172			  proc_handler *handler)
3173{
3174	int i;
3175	struct neigh_sysctl_table *t;
3176	const char *dev_name_source;
3177	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3178	char *p_name;
 
 
 
 
 
 
 
 
 
 
 
3179
3180	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3181	if (!t)
3182		goto err;
3183
3184	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3185		t->neigh_vars[i].data += (long) p;
3186		t->neigh_vars[i].extra1 = dev;
3187		t->neigh_vars[i].extra2 = p;
3188	}
 
 
 
 
 
 
 
 
 
3189
3190	if (dev) {
3191		dev_name_source = dev->name;
3192		/* Terminate the table early */
3193		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3194		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3195	} else {
3196		struct neigh_table *tbl = p->tbl;
3197		dev_name_source = "default";
3198		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3199		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3200		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3201		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3202	}
3203
 
3204	if (handler) {
3205		/* RetransTime */
3206		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
 
3207		/* ReachableTime */
3208		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
 
3209		/* RetransTime (in milliseconds)*/
3210		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3211		/* ReachableTime (in milliseconds) */
3212		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3213	} else {
3214		/* Those handlers will update p->reachable_time after
3215		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3216		 * applied after the next neighbour update instead of waiting for
3217		 * neigh_periodic_work to update its value (can be multiple minutes)
3218		 * So any handler that replaces them should do this as well
3219		 */
3220		/* ReachableTime */
3221		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3222			neigh_proc_base_reachable_time;
3223		/* ReachableTime (in milliseconds) */
3224		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3225			neigh_proc_base_reachable_time;
3226	}
3227
3228	/* Don't export sysctls to unprivileged users */
3229	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3230		t->neigh_vars[0].procname = NULL;
3231
3232	switch (neigh_parms_family(p)) {
3233	case AF_INET:
3234	      p_name = "ipv4";
3235	      break;
3236	case AF_INET6:
3237	      p_name = "ipv6";
3238	      break;
3239	default:
3240	      BUG();
3241	}
3242
3243	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3244		p_name, dev_name_source);
3245	t->sysctl_header =
3246		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3247	if (!t->sysctl_header)
3248		goto free;
3249
3250	p->sysctl_table = t;
3251	return 0;
3252
 
 
3253free:
3254	kfree(t);
3255err:
3256	return -ENOBUFS;
3257}
3258EXPORT_SYMBOL(neigh_sysctl_register);
3259
3260void neigh_sysctl_unregister(struct neigh_parms *p)
3261{
3262	if (p->sysctl_table) {
3263		struct neigh_sysctl_table *t = p->sysctl_table;
3264		p->sysctl_table = NULL;
3265		unregister_net_sysctl_table(t->sysctl_header);
 
3266		kfree(t);
3267	}
3268}
3269EXPORT_SYMBOL(neigh_sysctl_unregister);
3270
3271#endif	/* CONFIG_SYSCTL */
3272
3273static int __init neigh_init(void)
3274{
3275	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3276	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3277	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
3278
3279	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3280		      0);
3281	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3282
3283	return 0;
3284}
3285
3286subsys_initcall(neigh_init);
3287