Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Generic address resolution entity
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   8 *
   9 *	Fixes:
  10 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  11 *	Harald Welte		Add neighbour cache statistics like rtstat
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/slab.h>
  17#include <linux/kmemleak.h>
  18#include <linux/types.h>
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/socket.h>
  22#include <linux/netdevice.h>
  23#include <linux/proc_fs.h>
  24#ifdef CONFIG_SYSCTL
  25#include <linux/sysctl.h>
  26#endif
  27#include <linux/times.h>
  28#include <net/net_namespace.h>
  29#include <net/neighbour.h>
  30#include <net/arp.h>
  31#include <net/dst.h>
  32#include <net/sock.h>
  33#include <net/netevent.h>
  34#include <net/netlink.h>
  35#include <linux/rtnetlink.h>
  36#include <linux/random.h>
  37#include <linux/string.h>
  38#include <linux/log2.h>
  39#include <linux/inetdevice.h>
  40#include <net/addrconf.h>
  41
  42#include <trace/events/neigh.h>
  43
  44#define NEIGH_DEBUG 1
  45#define neigh_dbg(level, fmt, ...)		\
  46do {						\
  47	if (level <= NEIGH_DEBUG)		\
  48		pr_debug(fmt, ##__VA_ARGS__);	\
  49} while (0)
  50
  51#define PNEIGH_HASHMASK		0xF
  52
  53static void neigh_timer_handler(struct timer_list *t);
  54static void __neigh_notify(struct neighbour *n, int type, int flags,
  55			   u32 pid);
  56static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
  57static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
  58				    struct net_device *dev);
  59
  60#ifdef CONFIG_PROC_FS
  61static const struct seq_operations neigh_stat_seq_ops;
  62#endif
  63
  64/*
  65   Neighbour hash table buckets are protected with rwlock tbl->lock.
  66
  67   - All the scans/updates to hash buckets MUST be made under this lock.
  68   - NOTHING clever should be made under this lock: no callbacks
  69     to protocol backends, no attempts to send something to network.
  70     It will result in deadlocks, if backend/driver wants to use neighbour
  71     cache.
  72   - If the entry requires some non-trivial actions, increase
  73     its reference count and release table lock.
  74
  75   Neighbour entries are protected:
  76   - with reference count.
  77   - with rwlock neigh->lock
  78
  79   Reference count prevents destruction.
  80
  81   neigh->lock mainly serializes ll address data and its validity state.
  82   However, the same lock is used to protect another entry fields:
  83    - timer
  84    - resolution queue
  85
  86   Again, nothing clever shall be made under neigh->lock,
  87   the most complicated procedure, which we allow is dev->hard_header.
  88   It is supposed, that dev->hard_header is simplistic and does
  89   not make callbacks to neighbour tables.
  90 */
  91
  92static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
  93{
  94	kfree_skb(skb);
  95	return -ENETDOWN;
  96}
  97
  98static void neigh_cleanup_and_release(struct neighbour *neigh)
  99{
 100	trace_neigh_cleanup_and_release(neigh, 0);
 101	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
 102	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
 103	neigh_release(neigh);
 104}
 105
 106/*
 107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 108 * It corresponds to default IPv6 settings and is not overridable,
 109 * because it is really reasonable choice.
 110 */
 111
 112unsigned long neigh_rand_reach_time(unsigned long base)
 113{
 114	return base ? (prandom_u32() % base) + (base >> 1) : 0;
 115}
 116EXPORT_SYMBOL(neigh_rand_reach_time);
 117
 118static void neigh_mark_dead(struct neighbour *n)
 119{
 120	n->dead = 1;
 121	if (!list_empty(&n->gc_list)) {
 122		list_del_init(&n->gc_list);
 123		atomic_dec(&n->tbl->gc_entries);
 124	}
 
 
 125}
 126
 127static void neigh_update_gc_list(struct neighbour *n)
 128{
 129	bool on_gc_list, exempt_from_gc;
 130
 131	write_lock_bh(&n->tbl->lock);
 132	write_lock(&n->lock);
 133
 134	if (n->dead)
 135		goto out;
 136
 137	/* remove from the gc list if new state is permanent or if neighbor
 138	 * is externally learned; otherwise entry should be on the gc list
 139	 */
 140	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
 141			 n->flags & NTF_EXT_LEARNED;
 142	on_gc_list = !list_empty(&n->gc_list);
 143
 144	if (exempt_from_gc && on_gc_list) {
 145		list_del_init(&n->gc_list);
 146		atomic_dec(&n->tbl->gc_entries);
 147	} else if (!exempt_from_gc && !on_gc_list) {
 148		/* add entries to the tail; cleaning removes from the front */
 149		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 150		atomic_inc(&n->tbl->gc_entries);
 151	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152
 
 
 
 
 153out:
 154	write_unlock(&n->lock);
 155	write_unlock_bh(&n->tbl->lock);
 156}
 157
 158static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
 159				     int *notify)
 160{
 161	bool rc = false;
 162	u8 ndm_flags;
 163
 164	if (!(flags & NEIGH_UPDATE_F_ADMIN))
 165		return rc;
 166
 167	ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
 168	if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
 
 
 169		if (ndm_flags & NTF_EXT_LEARNED)
 170			neigh->flags |= NTF_EXT_LEARNED;
 171		else
 172			neigh->flags &= ~NTF_EXT_LEARNED;
 173		rc = true;
 174		*notify = 1;
 
 
 
 
 
 
 
 
 
 175	}
 176
 177	return rc;
 178}
 179
 180static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
 181		      struct neigh_table *tbl)
 182{
 183	bool retval = false;
 184
 185	write_lock(&n->lock);
 186	if (refcount_read(&n->refcnt) == 1) {
 187		struct neighbour *neigh;
 188
 189		neigh = rcu_dereference_protected(n->next,
 190						  lockdep_is_held(&tbl->lock));
 191		rcu_assign_pointer(*np, neigh);
 192		neigh_mark_dead(n);
 193		retval = true;
 194	}
 195	write_unlock(&n->lock);
 196	if (retval)
 197		neigh_cleanup_and_release(n);
 198	return retval;
 199}
 200
 201bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
 202{
 203	struct neigh_hash_table *nht;
 204	void *pkey = ndel->primary_key;
 205	u32 hash_val;
 206	struct neighbour *n;
 207	struct neighbour __rcu **np;
 208
 209	nht = rcu_dereference_protected(tbl->nht,
 210					lockdep_is_held(&tbl->lock));
 211	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
 212	hash_val = hash_val >> (32 - nht->hash_shift);
 213
 214	np = &nht->hash_buckets[hash_val];
 215	while ((n = rcu_dereference_protected(*np,
 216					      lockdep_is_held(&tbl->lock)))) {
 217		if (n == ndel)
 218			return neigh_del(n, np, tbl);
 219		np = &n->next;
 220	}
 221	return false;
 222}
 223
 224static int neigh_forced_gc(struct neigh_table *tbl)
 225{
 226	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
 
 
 227	unsigned long tref = jiffies - 5 * HZ;
 228	struct neighbour *n, *tmp;
 229	int shrunk = 0;
 
 230
 231	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 232
 233	write_lock_bh(&tbl->lock);
 234
 235	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
 236		if (refcount_read(&n->refcnt) == 1) {
 237			bool remove = false;
 238
 239			write_lock(&n->lock);
 240			if ((n->nud_state == NUD_FAILED) ||
 241			    (n->nud_state == NUD_NOARP) ||
 242			    (tbl->is_multicast &&
 243			     tbl->is_multicast(n->primary_key)) ||
 244			    time_after(tref, n->updated))
 245				remove = true;
 246			write_unlock(&n->lock);
 247
 248			if (remove && neigh_remove_one(n, tbl))
 249				shrunk++;
 250			if (shrunk >= max_clean)
 251				break;
 
 
 
 
 
 252		}
 253	}
 254
 255	tbl->last_flush = jiffies;
 256
 257	write_unlock_bh(&tbl->lock);
 258
 259	return shrunk;
 260}
 261
 262static void neigh_add_timer(struct neighbour *n, unsigned long when)
 263{
 
 
 
 
 
 
 264	neigh_hold(n);
 
 
 
 
 265	if (unlikely(mod_timer(&n->timer, when))) {
 266		printk("NEIGH: BUG, double timer add, state is %x\n",
 267		       n->nud_state);
 268		dump_stack();
 269	}
 270}
 271
 272static int neigh_del_timer(struct neighbour *n)
 273{
 274	if ((n->nud_state & NUD_IN_TIMER) &&
 275	    del_timer(&n->timer)) {
 276		neigh_release(n);
 277		return 1;
 278	}
 279	return 0;
 280}
 281
 282static void pneigh_queue_purge(struct sk_buff_head *list)
 
 
 
 
 
 
 
 
 
 
 
 
 283{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284	struct sk_buff *skb;
 285
 286	while ((skb = skb_dequeue(list)) != NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 287		dev_put(skb->dev);
 288		kfree_skb(skb);
 289	}
 290}
 291
 292static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
 293			    bool skip_perm)
 294{
 295	int i;
 296	struct neigh_hash_table *nht;
 297
 298	nht = rcu_dereference_protected(tbl->nht,
 299					lockdep_is_held(&tbl->lock));
 300
 301	for (i = 0; i < (1 << nht->hash_shift); i++) {
 302		struct neighbour *n;
 303		struct neighbour __rcu **np = &nht->hash_buckets[i];
 304
 305		while ((n = rcu_dereference_protected(*np,
 306					lockdep_is_held(&tbl->lock))) != NULL) {
 307			if (dev && n->dev != dev) {
 308				np = &n->next;
 309				continue;
 310			}
 311			if (skip_perm && n->nud_state & NUD_PERMANENT) {
 312				np = &n->next;
 313				continue;
 314			}
 315			rcu_assign_pointer(*np,
 316				   rcu_dereference_protected(n->next,
 317						lockdep_is_held(&tbl->lock)));
 318			write_lock(&n->lock);
 319			neigh_del_timer(n);
 320			neigh_mark_dead(n);
 321			if (refcount_read(&n->refcnt) != 1) {
 322				/* The most unpleasant situation.
 323				   We must destroy neighbour entry,
 324				   but someone still uses it.
 325
 326				   The destroy will be delayed until
 327				   the last user releases us, but
 328				   we must kill timers etc. and move
 329				   it to safe state.
 330				 */
 331				__skb_queue_purge(&n->arp_queue);
 332				n->arp_queue_len_bytes = 0;
 333				n->output = neigh_blackhole;
 334				if (n->nud_state & NUD_VALID)
 335					n->nud_state = NUD_NOARP;
 336				else
 337					n->nud_state = NUD_NONE;
 338				neigh_dbg(2, "neigh %p is stray\n", n);
 339			}
 340			write_unlock(&n->lock);
 341			neigh_cleanup_and_release(n);
 342		}
 343	}
 344}
 345
 346void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 347{
 348	write_lock_bh(&tbl->lock);
 349	neigh_flush_dev(tbl, dev, false);
 350	write_unlock_bh(&tbl->lock);
 351}
 352EXPORT_SYMBOL(neigh_changeaddr);
 353
 354static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
 355			  bool skip_perm)
 356{
 357	write_lock_bh(&tbl->lock);
 358	neigh_flush_dev(tbl, dev, skip_perm);
 359	pneigh_ifdown_and_unlock(tbl, dev);
 360
 361	del_timer_sync(&tbl->proxy_timer);
 362	pneigh_queue_purge(&tbl->proxy_queue);
 
 363	return 0;
 364}
 365
 366int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
 367{
 368	__neigh_ifdown(tbl, dev, true);
 369	return 0;
 370}
 371EXPORT_SYMBOL(neigh_carrier_down);
 372
 373int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 374{
 375	__neigh_ifdown(tbl, dev, false);
 376	return 0;
 377}
 378EXPORT_SYMBOL(neigh_ifdown);
 379
 380static struct neighbour *neigh_alloc(struct neigh_table *tbl,
 381				     struct net_device *dev,
 382				     bool exempt_from_gc)
 383{
 384	struct neighbour *n = NULL;
 385	unsigned long now = jiffies;
 386	int entries;
 387
 388	if (exempt_from_gc)
 389		goto do_alloc;
 390
 391	entries = atomic_inc_return(&tbl->gc_entries) - 1;
 392	if (entries >= tbl->gc_thresh3 ||
 393	    (entries >= tbl->gc_thresh2 &&
 394	     time_after(now, tbl->last_flush + 5 * HZ))) {
 395		if (!neigh_forced_gc(tbl) &&
 396		    entries >= tbl->gc_thresh3) {
 397			net_info_ratelimited("%s: neighbor table overflow!\n",
 398					     tbl->id);
 399			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 400			goto out_entries;
 401		}
 402	}
 403
 404do_alloc:
 405	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 406	if (!n)
 407		goto out_entries;
 408
 409	__skb_queue_head_init(&n->arp_queue);
 410	rwlock_init(&n->lock);
 411	seqlock_init(&n->ha_lock);
 412	n->updated	  = n->used = now;
 413	n->nud_state	  = NUD_NONE;
 414	n->output	  = neigh_blackhole;
 
 415	seqlock_init(&n->hh.hh_lock);
 416	n->parms	  = neigh_parms_clone(&tbl->parms);
 417	timer_setup(&n->timer, neigh_timer_handler, 0);
 418
 419	NEIGH_CACHE_STAT_INC(tbl, allocs);
 420	n->tbl		  = tbl;
 421	refcount_set(&n->refcnt, 1);
 422	n->dead		  = 1;
 423	INIT_LIST_HEAD(&n->gc_list);
 
 424
 425	atomic_inc(&tbl->entries);
 426out:
 427	return n;
 428
 429out_entries:
 430	if (!exempt_from_gc)
 431		atomic_dec(&tbl->gc_entries);
 432	goto out;
 433}
 434
 435static void neigh_get_hash_rnd(u32 *x)
 436{
 437	*x = get_random_u32() | 1;
 438}
 439
 440static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 441{
 442	size_t size = (1 << shift) * sizeof(struct neighbour *);
 443	struct neigh_hash_table *ret;
 444	struct neighbour __rcu **buckets;
 445	int i;
 446
 447	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 448	if (!ret)
 449		return NULL;
 450	if (size <= PAGE_SIZE) {
 451		buckets = kzalloc(size, GFP_ATOMIC);
 452	} else {
 453		buckets = (struct neighbour __rcu **)
 454			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 455					   get_order(size));
 456		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
 457	}
 458	if (!buckets) {
 459		kfree(ret);
 460		return NULL;
 461	}
 462	ret->hash_buckets = buckets;
 463	ret->hash_shift = shift;
 464	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 465		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 466	return ret;
 467}
 468
 469static void neigh_hash_free_rcu(struct rcu_head *head)
 470{
 471	struct neigh_hash_table *nht = container_of(head,
 472						    struct neigh_hash_table,
 473						    rcu);
 474	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 475	struct neighbour __rcu **buckets = nht->hash_buckets;
 476
 477	if (size <= PAGE_SIZE) {
 478		kfree(buckets);
 479	} else {
 480		kmemleak_free(buckets);
 481		free_pages((unsigned long)buckets, get_order(size));
 482	}
 483	kfree(nht);
 484}
 485
 486static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 487						unsigned long new_shift)
 488{
 489	unsigned int i, hash;
 490	struct neigh_hash_table *new_nht, *old_nht;
 491
 492	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 493
 494	old_nht = rcu_dereference_protected(tbl->nht,
 495					    lockdep_is_held(&tbl->lock));
 496	new_nht = neigh_hash_alloc(new_shift);
 497	if (!new_nht)
 498		return old_nht;
 499
 500	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 501		struct neighbour *n, *next;
 502
 503		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 504						   lockdep_is_held(&tbl->lock));
 505		     n != NULL;
 506		     n = next) {
 507			hash = tbl->hash(n->primary_key, n->dev,
 508					 new_nht->hash_rnd);
 509
 510			hash >>= (32 - new_nht->hash_shift);
 511			next = rcu_dereference_protected(n->next,
 512						lockdep_is_held(&tbl->lock));
 513
 514			rcu_assign_pointer(n->next,
 515					   rcu_dereference_protected(
 516						new_nht->hash_buckets[hash],
 517						lockdep_is_held(&tbl->lock)));
 518			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 519		}
 520	}
 521
 522	rcu_assign_pointer(tbl->nht, new_nht);
 523	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 524	return new_nht;
 525}
 526
 527struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 528			       struct net_device *dev)
 529{
 530	struct neighbour *n;
 531
 532	NEIGH_CACHE_STAT_INC(tbl, lookups);
 533
 534	rcu_read_lock_bh();
 535	n = __neigh_lookup_noref(tbl, pkey, dev);
 536	if (n) {
 537		if (!refcount_inc_not_zero(&n->refcnt))
 538			n = NULL;
 539		NEIGH_CACHE_STAT_INC(tbl, hits);
 540	}
 541
 542	rcu_read_unlock_bh();
 543	return n;
 544}
 545EXPORT_SYMBOL(neigh_lookup);
 546
 547struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 548				     const void *pkey)
 
 
 549{
 550	struct neighbour *n;
 551	unsigned int key_len = tbl->key_len;
 552	u32 hash_val;
 553	struct neigh_hash_table *nht;
 554
 555	NEIGH_CACHE_STAT_INC(tbl, lookups);
 556
 557	rcu_read_lock_bh();
 558	nht = rcu_dereference_bh(tbl->nht);
 559	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
 560
 561	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 562	     n != NULL;
 563	     n = rcu_dereference_bh(n->next)) {
 564		if (!memcmp(n->primary_key, pkey, key_len) &&
 565		    net_eq(dev_net(n->dev), net)) {
 566			if (!refcount_inc_not_zero(&n->refcnt))
 567				n = NULL;
 568			NEIGH_CACHE_STAT_INC(tbl, hits);
 569			break;
 570		}
 571	}
 572
 573	rcu_read_unlock_bh();
 574	return n;
 575}
 576EXPORT_SYMBOL(neigh_lookup_nodev);
 577
 578static struct neighbour *___neigh_create(struct neigh_table *tbl,
 579					 const void *pkey,
 580					 struct net_device *dev,
 581					 bool exempt_from_gc, bool want_ref)
 582{
 583	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
 584	u32 hash_val;
 585	unsigned int key_len = tbl->key_len;
 586	int error;
 587	struct neigh_hash_table *nht;
 588
 
 589	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
 590
 591	if (!n) {
 592		rc = ERR_PTR(-ENOBUFS);
 593		goto out;
 594	}
 595
 596	memcpy(n->primary_key, pkey, key_len);
 597	n->dev = dev;
 598	dev_hold(dev);
 599
 600	/* Protocol specific setup. */
 601	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 602		rc = ERR_PTR(error);
 603		goto out_neigh_release;
 604	}
 605
 606	if (dev->netdev_ops->ndo_neigh_construct) {
 607		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
 608		if (error < 0) {
 609			rc = ERR_PTR(error);
 610			goto out_neigh_release;
 611		}
 612	}
 613
 614	/* Device specific setup. */
 615	if (n->parms->neigh_setup &&
 616	    (error = n->parms->neigh_setup(n)) < 0) {
 617		rc = ERR_PTR(error);
 618		goto out_neigh_release;
 619	}
 620
 621	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 622
 623	write_lock_bh(&tbl->lock);
 624	nht = rcu_dereference_protected(tbl->nht,
 625					lockdep_is_held(&tbl->lock));
 626
 627	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 628		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 629
 630	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 631
 632	if (n->parms->dead) {
 633		rc = ERR_PTR(-EINVAL);
 634		goto out_tbl_unlock;
 635	}
 636
 637	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 638					    lockdep_is_held(&tbl->lock));
 639	     n1 != NULL;
 640	     n1 = rcu_dereference_protected(n1->next,
 641			lockdep_is_held(&tbl->lock))) {
 642		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
 643			if (want_ref)
 644				neigh_hold(n1);
 645			rc = n1;
 646			goto out_tbl_unlock;
 647		}
 648	}
 649
 650	n->dead = 0;
 651	if (!exempt_from_gc)
 652		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 653
 
 654	if (want_ref)
 655		neigh_hold(n);
 656	rcu_assign_pointer(n->next,
 657			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 658						     lockdep_is_held(&tbl->lock)));
 659	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 660	write_unlock_bh(&tbl->lock);
 661	neigh_dbg(2, "neigh %p is created\n", n);
 662	rc = n;
 663out:
 664	return rc;
 665out_tbl_unlock:
 666	write_unlock_bh(&tbl->lock);
 667out_neigh_release:
 668	if (!exempt_from_gc)
 669		atomic_dec(&tbl->gc_entries);
 670	neigh_release(n);
 671	goto out;
 672}
 673
 674struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 675				 struct net_device *dev, bool want_ref)
 676{
 677	return ___neigh_create(tbl, pkey, dev, false, want_ref);
 678}
 679EXPORT_SYMBOL(__neigh_create);
 680
 681static u32 pneigh_hash(const void *pkey, unsigned int key_len)
 682{
 683	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 684	hash_val ^= (hash_val >> 16);
 685	hash_val ^= hash_val >> 8;
 686	hash_val ^= hash_val >> 4;
 687	hash_val &= PNEIGH_HASHMASK;
 688	return hash_val;
 689}
 690
 691static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 692					      struct net *net,
 693					      const void *pkey,
 694					      unsigned int key_len,
 695					      struct net_device *dev)
 696{
 697	while (n) {
 698		if (!memcmp(n->key, pkey, key_len) &&
 699		    net_eq(pneigh_net(n), net) &&
 700		    (n->dev == dev || !n->dev))
 701			return n;
 702		n = n->next;
 703	}
 704	return NULL;
 705}
 706
 707struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 708		struct net *net, const void *pkey, struct net_device *dev)
 709{
 710	unsigned int key_len = tbl->key_len;
 711	u32 hash_val = pneigh_hash(pkey, key_len);
 712
 713	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 714				 net, pkey, key_len, dev);
 715}
 716EXPORT_SYMBOL_GPL(__pneigh_lookup);
 717
 718struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 719				    struct net *net, const void *pkey,
 720				    struct net_device *dev, int creat)
 721{
 722	struct pneigh_entry *n;
 723	unsigned int key_len = tbl->key_len;
 724	u32 hash_val = pneigh_hash(pkey, key_len);
 725
 726	read_lock_bh(&tbl->lock);
 727	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 728			      net, pkey, key_len, dev);
 729	read_unlock_bh(&tbl->lock);
 730
 731	if (n || !creat)
 732		goto out;
 733
 734	ASSERT_RTNL();
 735
 736	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
 737	if (!n)
 738		goto out;
 739
 740	n->protocol = 0;
 741	write_pnet(&n->net, net);
 742	memcpy(n->key, pkey, key_len);
 743	n->dev = dev;
 744	if (dev)
 745		dev_hold(dev);
 746
 747	if (tbl->pconstructor && tbl->pconstructor(n)) {
 748		if (dev)
 749			dev_put(dev);
 750		kfree(n);
 751		n = NULL;
 752		goto out;
 753	}
 754
 755	write_lock_bh(&tbl->lock);
 756	n->next = tbl->phash_buckets[hash_val];
 757	tbl->phash_buckets[hash_val] = n;
 758	write_unlock_bh(&tbl->lock);
 759out:
 760	return n;
 761}
 762EXPORT_SYMBOL(pneigh_lookup);
 763
 764
 765int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 766		  struct net_device *dev)
 767{
 768	struct pneigh_entry *n, **np;
 769	unsigned int key_len = tbl->key_len;
 770	u32 hash_val = pneigh_hash(pkey, key_len);
 771
 772	write_lock_bh(&tbl->lock);
 773	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 774	     np = &n->next) {
 775		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 776		    net_eq(pneigh_net(n), net)) {
 777			*np = n->next;
 778			write_unlock_bh(&tbl->lock);
 779			if (tbl->pdestructor)
 780				tbl->pdestructor(n);
 781			if (n->dev)
 782				dev_put(n->dev);
 783			kfree(n);
 784			return 0;
 785		}
 786	}
 787	write_unlock_bh(&tbl->lock);
 788	return -ENOENT;
 789}
 790
 791static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
 792				    struct net_device *dev)
 793{
 794	struct pneigh_entry *n, **np, *freelist = NULL;
 795	u32 h;
 796
 797	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 798		np = &tbl->phash_buckets[h];
 799		while ((n = *np) != NULL) {
 800			if (!dev || n->dev == dev) {
 801				*np = n->next;
 802				n->next = freelist;
 803				freelist = n;
 804				continue;
 805			}
 806			np = &n->next;
 807		}
 808	}
 809	write_unlock_bh(&tbl->lock);
 810	while ((n = freelist)) {
 811		freelist = n->next;
 812		n->next = NULL;
 813		if (tbl->pdestructor)
 814			tbl->pdestructor(n);
 815		if (n->dev)
 816			dev_put(n->dev);
 817		kfree(n);
 818	}
 819	return -ENOENT;
 820}
 821
 822static void neigh_parms_destroy(struct neigh_parms *parms);
 823
 824static inline void neigh_parms_put(struct neigh_parms *parms)
 825{
 826	if (refcount_dec_and_test(&parms->refcnt))
 827		neigh_parms_destroy(parms);
 828}
 829
 830/*
 831 *	neighbour must already be out of the table;
 832 *
 833 */
 834void neigh_destroy(struct neighbour *neigh)
 835{
 836	struct net_device *dev = neigh->dev;
 837
 838	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 839
 840	if (!neigh->dead) {
 841		pr_warn("Destroying alive neighbour %p\n", neigh);
 842		dump_stack();
 843		return;
 844	}
 845
 846	if (neigh_del_timer(neigh))
 847		pr_warn("Impossible event\n");
 848
 849	write_lock_bh(&neigh->lock);
 850	__skb_queue_purge(&neigh->arp_queue);
 851	write_unlock_bh(&neigh->lock);
 852	neigh->arp_queue_len_bytes = 0;
 853
 854	if (dev->netdev_ops->ndo_neigh_destroy)
 855		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
 856
 857	dev_put(dev);
 858	neigh_parms_put(neigh->parms);
 859
 860	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 861
 862	atomic_dec(&neigh->tbl->entries);
 863	kfree_rcu(neigh, rcu);
 864}
 865EXPORT_SYMBOL(neigh_destroy);
 866
 867/* Neighbour state is suspicious;
 868   disable fast path.
 869
 870   Called with write_locked neigh.
 871 */
 872static void neigh_suspect(struct neighbour *neigh)
 873{
 874	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 875
 876	neigh->output = neigh->ops->output;
 877}
 878
 879/* Neighbour state is OK;
 880   enable fast path.
 881
 882   Called with write_locked neigh.
 883 */
 884static void neigh_connect(struct neighbour *neigh)
 885{
 886	neigh_dbg(2, "neigh %p is connected\n", neigh);
 887
 888	neigh->output = neigh->ops->connected_output;
 889}
 890
 891static void neigh_periodic_work(struct work_struct *work)
 892{
 893	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 894	struct neighbour *n;
 895	struct neighbour __rcu **np;
 896	unsigned int i;
 897	struct neigh_hash_table *nht;
 898
 899	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 900
 901	write_lock_bh(&tbl->lock);
 902	nht = rcu_dereference_protected(tbl->nht,
 903					lockdep_is_held(&tbl->lock));
 904
 905	/*
 906	 *	periodically recompute ReachableTime from random function
 907	 */
 908
 909	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 910		struct neigh_parms *p;
 911		tbl->last_rand = jiffies;
 
 912		list_for_each_entry(p, &tbl->parms_list, list)
 913			p->reachable_time =
 914				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 915	}
 916
 917	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
 918		goto out;
 919
 920	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 921		np = &nht->hash_buckets[i];
 922
 923		while ((n = rcu_dereference_protected(*np,
 924				lockdep_is_held(&tbl->lock))) != NULL) {
 925			unsigned int state;
 926
 927			write_lock(&n->lock);
 928
 929			state = n->nud_state;
 930			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
 931			    (n->flags & NTF_EXT_LEARNED)) {
 932				write_unlock(&n->lock);
 933				goto next_elt;
 934			}
 935
 936			if (time_before(n->used, n->confirmed))
 
 937				n->used = n->confirmed;
 938
 939			if (refcount_read(&n->refcnt) == 1 &&
 940			    (state == NUD_FAILED ||
 941			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 942				*np = n->next;
 
 
 
 943				neigh_mark_dead(n);
 944				write_unlock(&n->lock);
 945				neigh_cleanup_and_release(n);
 946				continue;
 947			}
 948			write_unlock(&n->lock);
 949
 950next_elt:
 951			np = &n->next;
 952		}
 953		/*
 954		 * It's fine to release lock here, even if hash table
 955		 * grows while we are preempted.
 956		 */
 957		write_unlock_bh(&tbl->lock);
 958		cond_resched();
 959		write_lock_bh(&tbl->lock);
 960		nht = rcu_dereference_protected(tbl->nht,
 961						lockdep_is_held(&tbl->lock));
 962	}
 963out:
 964	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 965	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 966	 * BASE_REACHABLE_TIME.
 967	 */
 968	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 969			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 970	write_unlock_bh(&tbl->lock);
 971}
 972
 973static __inline__ int neigh_max_probes(struct neighbour *n)
 974{
 975	struct neigh_parms *p = n->parms;
 976	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 977	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 978	        NEIGH_VAR(p, MCAST_PROBES));
 979}
 980
 981static void neigh_invalidate(struct neighbour *neigh)
 982	__releases(neigh->lock)
 983	__acquires(neigh->lock)
 984{
 985	struct sk_buff *skb;
 986
 987	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 988	neigh_dbg(2, "neigh %p is failed\n", neigh);
 989	neigh->updated = jiffies;
 990
 991	/* It is very thin place. report_unreachable is very complicated
 992	   routine. Particularly, it can hit the same neighbour entry!
 993
 994	   So that, we try to be accurate and avoid dead loop. --ANK
 995	 */
 996	while (neigh->nud_state == NUD_FAILED &&
 997	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
 998		write_unlock(&neigh->lock);
 999		neigh->ops->error_report(neigh, skb);
1000		write_lock(&neigh->lock);
1001	}
1002	__skb_queue_purge(&neigh->arp_queue);
1003	neigh->arp_queue_len_bytes = 0;
1004}
1005
1006static void neigh_probe(struct neighbour *neigh)
1007	__releases(neigh->lock)
1008{
1009	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1010	/* keep skb alive even if arp_queue overflows */
1011	if (skb)
1012		skb = skb_clone(skb, GFP_ATOMIC);
1013	write_unlock(&neigh->lock);
1014	if (neigh->ops->solicit)
1015		neigh->ops->solicit(neigh, skb);
1016	atomic_inc(&neigh->probes);
1017	consume_skb(skb);
1018}
1019
1020/* Called when a timer expires for a neighbour entry. */
1021
1022static void neigh_timer_handler(struct timer_list *t)
1023{
1024	unsigned long now, next;
1025	struct neighbour *neigh = from_timer(neigh, t, timer);
1026	unsigned int state;
1027	int notify = 0;
1028
1029	write_lock(&neigh->lock);
1030
1031	state = neigh->nud_state;
1032	now = jiffies;
1033	next = now + HZ;
1034
1035	if (!(state & NUD_IN_TIMER))
1036		goto out;
1037
1038	if (state & NUD_REACHABLE) {
1039		if (time_before_eq(now,
1040				   neigh->confirmed + neigh->parms->reachable_time)) {
1041			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1042			next = neigh->confirmed + neigh->parms->reachable_time;
1043		} else if (time_before_eq(now,
1044					  neigh->used +
1045					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1046			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1047			neigh->nud_state = NUD_DELAY;
1048			neigh->updated = jiffies;
1049			neigh_suspect(neigh);
1050			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1051		} else {
1052			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1053			neigh->nud_state = NUD_STALE;
1054			neigh->updated = jiffies;
1055			neigh_suspect(neigh);
1056			notify = 1;
1057		}
1058	} else if (state & NUD_DELAY) {
1059		if (time_before_eq(now,
1060				   neigh->confirmed +
1061				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1062			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1063			neigh->nud_state = NUD_REACHABLE;
1064			neigh->updated = jiffies;
1065			neigh_connect(neigh);
1066			notify = 1;
1067			next = neigh->confirmed + neigh->parms->reachable_time;
1068		} else {
1069			neigh_dbg(2, "neigh %p is probed\n", neigh);
1070			neigh->nud_state = NUD_PROBE;
1071			neigh->updated = jiffies;
1072			atomic_set(&neigh->probes, 0);
1073			notify = 1;
1074			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1075					 HZ/100);
1076		}
1077	} else {
1078		/* NUD_PROBE|NUD_INCOMPLETE */
1079		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1080	}
1081
1082	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1083	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1084		neigh->nud_state = NUD_FAILED;
1085		notify = 1;
1086		neigh_invalidate(neigh);
1087		goto out;
1088	}
1089
1090	if (neigh->nud_state & NUD_IN_TIMER) {
1091		if (time_before(next, jiffies + HZ/100))
1092			next = jiffies + HZ/100;
1093		if (!mod_timer(&neigh->timer, next))
1094			neigh_hold(neigh);
1095	}
1096	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1097		neigh_probe(neigh);
1098	} else {
1099out:
1100		write_unlock(&neigh->lock);
1101	}
1102
1103	if (notify)
1104		neigh_update_notify(neigh, 0);
1105
1106	trace_neigh_timer_handler(neigh, 0);
1107
1108	neigh_release(neigh);
1109}
1110
1111int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
1112{
1113	int rc;
1114	bool immediate_probe = false;
1115
1116	write_lock_bh(&neigh->lock);
1117
1118	rc = 0;
1119	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1120		goto out_unlock_bh;
1121	if (neigh->dead)
1122		goto out_dead;
1123
1124	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1125		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1126		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1127			unsigned long next, now = jiffies;
1128
1129			atomic_set(&neigh->probes,
1130				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1131			neigh_del_timer(neigh);
1132			neigh->nud_state     = NUD_INCOMPLETE;
1133			neigh->updated = now;
1134			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1135					 HZ/100);
 
 
 
 
 
 
1136			neigh_add_timer(neigh, next);
1137			immediate_probe = true;
1138		} else {
1139			neigh->nud_state = NUD_FAILED;
1140			neigh->updated = jiffies;
1141			write_unlock_bh(&neigh->lock);
1142
1143			kfree_skb(skb);
1144			return 1;
1145		}
1146	} else if (neigh->nud_state & NUD_STALE) {
1147		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1148		neigh_del_timer(neigh);
1149		neigh->nud_state = NUD_DELAY;
1150		neigh->updated = jiffies;
1151		neigh_add_timer(neigh, jiffies +
1152				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1153	}
1154
1155	if (neigh->nud_state == NUD_INCOMPLETE) {
1156		if (skb) {
1157			while (neigh->arp_queue_len_bytes + skb->truesize >
1158			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1159				struct sk_buff *buff;
1160
1161				buff = __skb_dequeue(&neigh->arp_queue);
1162				if (!buff)
1163					break;
1164				neigh->arp_queue_len_bytes -= buff->truesize;
1165				kfree_skb(buff);
1166				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1167			}
1168			skb_dst_force(skb);
1169			__skb_queue_tail(&neigh->arp_queue, skb);
1170			neigh->arp_queue_len_bytes += skb->truesize;
1171		}
1172		rc = 1;
1173	}
1174out_unlock_bh:
1175	if (immediate_probe)
1176		neigh_probe(neigh);
1177	else
1178		write_unlock(&neigh->lock);
1179	local_bh_enable();
1180	trace_neigh_event_send_done(neigh, rc);
1181	return rc;
1182
1183out_dead:
1184	if (neigh->nud_state & NUD_STALE)
1185		goto out_unlock_bh;
1186	write_unlock_bh(&neigh->lock);
1187	kfree_skb(skb);
1188	trace_neigh_event_send_dead(neigh, 1);
1189	return 1;
1190}
1191EXPORT_SYMBOL(__neigh_event_send);
1192
1193static void neigh_update_hhs(struct neighbour *neigh)
1194{
1195	struct hh_cache *hh;
1196	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1197		= NULL;
1198
1199	if (neigh->dev->header_ops)
1200		update = neigh->dev->header_ops->cache_update;
1201
1202	if (update) {
1203		hh = &neigh->hh;
1204		if (READ_ONCE(hh->hh_len)) {
1205			write_seqlock_bh(&hh->hh_lock);
1206			update(hh, neigh->dev, neigh->ha);
1207			write_sequnlock_bh(&hh->hh_lock);
1208		}
1209	}
1210}
1211
1212
1213
1214/* Generic update routine.
1215   -- lladdr is new lladdr or NULL, if it is not supplied.
1216   -- new    is new state.
1217   -- flags
1218	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1219				if it is different.
1220	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1221				lladdr instead of overriding it
1222				if it is different.
1223	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1224
 
1225	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1226				NTF_ROUTER flag.
1227	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1228				a router.
1229
1230   Caller MUST hold reference count on the entry.
1231 */
1232
1233static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1234			  u8 new, u32 flags, u32 nlmsg_pid,
1235			  struct netlink_ext_ack *extack)
1236{
1237	bool ext_learn_change = false;
1238	u8 old;
1239	int err;
1240	int notify = 0;
1241	struct net_device *dev;
1242	int update_isrouter = 0;
 
 
 
1243
1244	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1245
1246	write_lock_bh(&neigh->lock);
1247
1248	dev    = neigh->dev;
1249	old    = neigh->nud_state;
1250	err    = -EPERM;
1251
1252	if (neigh->dead) {
1253		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1254		new = old;
1255		goto out;
1256	}
1257	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1258	    (old & (NUD_NOARP | NUD_PERMANENT)))
1259		goto out;
1260
1261	ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
 
 
 
 
 
 
1262
1263	if (!(new & NUD_VALID)) {
1264		neigh_del_timer(neigh);
1265		if (old & NUD_CONNECTED)
1266			neigh_suspect(neigh);
1267		neigh->nud_state = new;
1268		err = 0;
1269		notify = old & NUD_VALID;
1270		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1271		    (new & NUD_FAILED)) {
1272			neigh_invalidate(neigh);
1273			notify = 1;
1274		}
1275		goto out;
1276	}
1277
1278	/* Compare new lladdr with cached one */
1279	if (!dev->addr_len) {
1280		/* First case: device needs no address. */
1281		lladdr = neigh->ha;
1282	} else if (lladdr) {
1283		/* The second case: if something is already cached
1284		   and a new address is proposed:
1285		   - compare new & old
1286		   - if they are different, check override flag
1287		 */
1288		if ((old & NUD_VALID) &&
1289		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1290			lladdr = neigh->ha;
1291	} else {
1292		/* No address is supplied; if we know something,
1293		   use it, otherwise discard the request.
1294		 */
1295		err = -EINVAL;
1296		if (!(old & NUD_VALID)) {
1297			NL_SET_ERR_MSG(extack, "No link layer address given");
1298			goto out;
1299		}
1300		lladdr = neigh->ha;
1301	}
1302
1303	/* Update confirmed timestamp for neighbour entry after we
1304	 * received ARP packet even if it doesn't change IP to MAC binding.
1305	 */
1306	if (new & NUD_CONNECTED)
1307		neigh->confirmed = jiffies;
1308
1309	/* If entry was valid and address is not changed,
1310	   do not change entry state, if new one is STALE.
1311	 */
1312	err = 0;
1313	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1314	if (old & NUD_VALID) {
1315		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1316			update_isrouter = 0;
1317			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1318			    (old & NUD_CONNECTED)) {
1319				lladdr = neigh->ha;
1320				new = NUD_STALE;
1321			} else
1322				goto out;
1323		} else {
1324			if (lladdr == neigh->ha && new == NUD_STALE &&
1325			    !(flags & NEIGH_UPDATE_F_ADMIN))
1326				new = old;
1327		}
1328	}
1329
1330	/* Update timestamp only once we know we will make a change to the
1331	 * neighbour entry. Otherwise we risk to move the locktime window with
1332	 * noop updates and ignore relevant ARP updates.
1333	 */
1334	if (new != old || lladdr != neigh->ha)
1335		neigh->updated = jiffies;
1336
1337	if (new != old) {
1338		neigh_del_timer(neigh);
1339		if (new & NUD_PROBE)
1340			atomic_set(&neigh->probes, 0);
1341		if (new & NUD_IN_TIMER)
1342			neigh_add_timer(neigh, (jiffies +
1343						((new & NUD_REACHABLE) ?
1344						 neigh->parms->reachable_time :
1345						 0)));
1346		neigh->nud_state = new;
1347		notify = 1;
1348	}
1349
1350	if (lladdr != neigh->ha) {
1351		write_seqlock(&neigh->ha_lock);
1352		memcpy(&neigh->ha, lladdr, dev->addr_len);
1353		write_sequnlock(&neigh->ha_lock);
1354		neigh_update_hhs(neigh);
1355		if (!(new & NUD_CONNECTED))
1356			neigh->confirmed = jiffies -
1357				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1358		notify = 1;
1359	}
1360	if (new == old)
1361		goto out;
1362	if (new & NUD_CONNECTED)
1363		neigh_connect(neigh);
1364	else
1365		neigh_suspect(neigh);
1366	if (!(old & NUD_VALID)) {
1367		struct sk_buff *skb;
1368
1369		/* Again: avoid dead loop if something went wrong */
1370
1371		while (neigh->nud_state & NUD_VALID &&
1372		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1373			struct dst_entry *dst = skb_dst(skb);
1374			struct neighbour *n2, *n1 = neigh;
1375			write_unlock_bh(&neigh->lock);
1376
1377			rcu_read_lock();
1378
1379			/* Why not just use 'neigh' as-is?  The problem is that
1380			 * things such as shaper, eql, and sch_teql can end up
1381			 * using alternative, different, neigh objects to output
1382			 * the packet in the output path.  So what we need to do
1383			 * here is re-lookup the top-level neigh in the path so
1384			 * we can reinject the packet there.
1385			 */
1386			n2 = NULL;
1387			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1388				n2 = dst_neigh_lookup_skb(dst, skb);
1389				if (n2)
1390					n1 = n2;
1391			}
1392			n1->output(n1, skb);
1393			if (n2)
1394				neigh_release(n2);
1395			rcu_read_unlock();
1396
1397			write_lock_bh(&neigh->lock);
1398		}
1399		__skb_queue_purge(&neigh->arp_queue);
1400		neigh->arp_queue_len_bytes = 0;
1401	}
1402out:
1403	if (update_isrouter)
1404		neigh_update_is_router(neigh, flags, &notify);
1405	write_unlock_bh(&neigh->lock);
1406
1407	if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1408		neigh_update_gc_list(neigh);
1409
 
1410	if (notify)
1411		neigh_update_notify(neigh, nlmsg_pid);
1412
1413	trace_neigh_update_done(neigh, err);
1414
1415	return err;
1416}
1417
1418int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1419		 u32 flags, u32 nlmsg_pid)
1420{
1421	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1422}
1423EXPORT_SYMBOL(neigh_update);
1424
1425/* Update the neigh to listen temporarily for probe responses, even if it is
1426 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1427 */
1428void __neigh_set_probe_once(struct neighbour *neigh)
1429{
1430	if (neigh->dead)
1431		return;
1432	neigh->updated = jiffies;
1433	if (!(neigh->nud_state & NUD_FAILED))
1434		return;
1435	neigh->nud_state = NUD_INCOMPLETE;
1436	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1437	neigh_add_timer(neigh,
1438			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1439				      HZ/100));
1440}
1441EXPORT_SYMBOL(__neigh_set_probe_once);
1442
1443struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1444				 u8 *lladdr, void *saddr,
1445				 struct net_device *dev)
1446{
1447	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1448						 lladdr || !dev->addr_len);
1449	if (neigh)
1450		neigh_update(neigh, lladdr, NUD_STALE,
1451			     NEIGH_UPDATE_F_OVERRIDE, 0);
1452	return neigh;
1453}
1454EXPORT_SYMBOL(neigh_event_ns);
1455
1456/* called with read_lock_bh(&n->lock); */
1457static void neigh_hh_init(struct neighbour *n)
1458{
1459	struct net_device *dev = n->dev;
1460	__be16 prot = n->tbl->protocol;
1461	struct hh_cache	*hh = &n->hh;
1462
1463	write_lock_bh(&n->lock);
1464
1465	/* Only one thread can come in here and initialize the
1466	 * hh_cache entry.
1467	 */
1468	if (!hh->hh_len)
1469		dev->header_ops->cache(n, hh, prot);
1470
1471	write_unlock_bh(&n->lock);
1472}
1473
1474/* Slow and careful. */
1475
1476int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1477{
1478	int rc = 0;
1479
1480	if (!neigh_event_send(neigh, skb)) {
1481		int err;
1482		struct net_device *dev = neigh->dev;
1483		unsigned int seq;
1484
1485		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1486			neigh_hh_init(neigh);
1487
1488		do {
1489			__skb_pull(skb, skb_network_offset(skb));
1490			seq = read_seqbegin(&neigh->ha_lock);
1491			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1492					      neigh->ha, NULL, skb->len);
1493		} while (read_seqretry(&neigh->ha_lock, seq));
1494
1495		if (err >= 0)
1496			rc = dev_queue_xmit(skb);
1497		else
1498			goto out_kfree_skb;
1499	}
1500out:
1501	return rc;
1502out_kfree_skb:
1503	rc = -EINVAL;
1504	kfree_skb(skb);
1505	goto out;
1506}
1507EXPORT_SYMBOL(neigh_resolve_output);
1508
1509/* As fast as possible without hh cache */
1510
1511int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1512{
1513	struct net_device *dev = neigh->dev;
1514	unsigned int seq;
1515	int err;
1516
1517	do {
1518		__skb_pull(skb, skb_network_offset(skb));
1519		seq = read_seqbegin(&neigh->ha_lock);
1520		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1521				      neigh->ha, NULL, skb->len);
1522	} while (read_seqretry(&neigh->ha_lock, seq));
1523
1524	if (err >= 0)
1525		err = dev_queue_xmit(skb);
1526	else {
1527		err = -EINVAL;
1528		kfree_skb(skb);
1529	}
1530	return err;
1531}
1532EXPORT_SYMBOL(neigh_connected_output);
1533
1534int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1535{
1536	return dev_queue_xmit(skb);
1537}
1538EXPORT_SYMBOL(neigh_direct_output);
1539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1540static void neigh_proxy_process(struct timer_list *t)
1541{
1542	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1543	long sched_next = 0;
1544	unsigned long now = jiffies;
1545	struct sk_buff *skb, *n;
1546
1547	spin_lock(&tbl->proxy_queue.lock);
1548
1549	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1550		long tdif = NEIGH_CB(skb)->sched_next - now;
1551
1552		if (tdif <= 0) {
1553			struct net_device *dev = skb->dev;
1554
 
1555			__skb_unlink(skb, &tbl->proxy_queue);
 
1556			if (tbl->proxy_redo && netif_running(dev)) {
1557				rcu_read_lock();
1558				tbl->proxy_redo(skb);
1559				rcu_read_unlock();
1560			} else {
1561				kfree_skb(skb);
1562			}
1563
1564			dev_put(dev);
1565		} else if (!sched_next || tdif < sched_next)
1566			sched_next = tdif;
1567	}
1568	del_timer(&tbl->proxy_timer);
1569	if (sched_next)
1570		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1571	spin_unlock(&tbl->proxy_queue.lock);
1572}
1573
 
 
 
 
 
 
 
 
 
 
 
1574void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1575		    struct sk_buff *skb)
1576{
1577	unsigned long sched_next = jiffies +
1578			prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
1579
1580	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1581		kfree_skb(skb);
1582		return;
1583	}
1584
1585	NEIGH_CB(skb)->sched_next = sched_next;
1586	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1587
1588	spin_lock(&tbl->proxy_queue.lock);
1589	if (del_timer(&tbl->proxy_timer)) {
1590		if (time_before(tbl->proxy_timer.expires, sched_next))
1591			sched_next = tbl->proxy_timer.expires;
1592	}
1593	skb_dst_drop(skb);
1594	dev_hold(skb->dev);
1595	__skb_queue_tail(&tbl->proxy_queue, skb);
 
1596	mod_timer(&tbl->proxy_timer, sched_next);
1597	spin_unlock(&tbl->proxy_queue.lock);
1598}
1599EXPORT_SYMBOL(pneigh_enqueue);
1600
1601static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1602						      struct net *net, int ifindex)
1603{
1604	struct neigh_parms *p;
1605
1606	list_for_each_entry(p, &tbl->parms_list, list) {
1607		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1608		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1609			return p;
1610	}
1611
1612	return NULL;
1613}
1614
1615struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1616				      struct neigh_table *tbl)
1617{
1618	struct neigh_parms *p;
1619	struct net *net = dev_net(dev);
1620	const struct net_device_ops *ops = dev->netdev_ops;
1621
1622	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1623	if (p) {
1624		p->tbl		  = tbl;
1625		refcount_set(&p->refcnt, 1);
1626		p->reachable_time =
1627				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1628		dev_hold(dev);
 
1629		p->dev = dev;
1630		write_pnet(&p->net, net);
1631		p->sysctl_table = NULL;
1632
1633		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1634			dev_put(dev);
1635			kfree(p);
1636			return NULL;
1637		}
1638
1639		write_lock_bh(&tbl->lock);
1640		list_add(&p->list, &tbl->parms.list);
1641		write_unlock_bh(&tbl->lock);
1642
1643		neigh_parms_data_state_cleanall(p);
1644	}
1645	return p;
1646}
1647EXPORT_SYMBOL(neigh_parms_alloc);
1648
1649static void neigh_rcu_free_parms(struct rcu_head *head)
1650{
1651	struct neigh_parms *parms =
1652		container_of(head, struct neigh_parms, rcu_head);
1653
1654	neigh_parms_put(parms);
1655}
1656
1657void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1658{
1659	if (!parms || parms == &tbl->parms)
1660		return;
1661	write_lock_bh(&tbl->lock);
1662	list_del(&parms->list);
1663	parms->dead = 1;
1664	write_unlock_bh(&tbl->lock);
1665	if (parms->dev)
1666		dev_put(parms->dev);
1667	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1668}
1669EXPORT_SYMBOL(neigh_parms_release);
1670
1671static void neigh_parms_destroy(struct neigh_parms *parms)
1672{
1673	kfree(parms);
1674}
1675
1676static struct lock_class_key neigh_table_proxy_queue_class;
1677
1678static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1679
1680void neigh_table_init(int index, struct neigh_table *tbl)
1681{
1682	unsigned long now = jiffies;
1683	unsigned long phsize;
1684
1685	INIT_LIST_HEAD(&tbl->parms_list);
1686	INIT_LIST_HEAD(&tbl->gc_list);
 
 
1687	list_add(&tbl->parms.list, &tbl->parms_list);
1688	write_pnet(&tbl->parms.net, &init_net);
1689	refcount_set(&tbl->parms.refcnt, 1);
1690	tbl->parms.reachable_time =
1691			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
 
1692
1693	tbl->stats = alloc_percpu(struct neigh_statistics);
1694	if (!tbl->stats)
1695		panic("cannot create neighbour cache statistics");
1696
1697#ifdef CONFIG_PROC_FS
1698	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1699			      &neigh_stat_seq_ops, tbl))
1700		panic("cannot create neighbour proc dir entry");
1701#endif
1702
1703	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1704
1705	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1706	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1707
1708	if (!tbl->nht || !tbl->phash_buckets)
1709		panic("cannot allocate neighbour cache hashes");
1710
1711	if (!tbl->entry_size)
1712		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1713					tbl->key_len, NEIGH_PRIV_ALIGN);
1714	else
1715		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1716
1717	rwlock_init(&tbl->lock);
 
1718	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1719	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1720			tbl->parms.reachable_time);
 
 
 
1721	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1722	skb_queue_head_init_class(&tbl->proxy_queue,
1723			&neigh_table_proxy_queue_class);
1724
1725	tbl->last_flush = now;
1726	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1727
1728	neigh_tables[index] = tbl;
1729}
1730EXPORT_SYMBOL(neigh_table_init);
1731
1732int neigh_table_clear(int index, struct neigh_table *tbl)
1733{
1734	neigh_tables[index] = NULL;
1735	/* It is not clean... Fix it to unload IPv6 module safely */
 
1736	cancel_delayed_work_sync(&tbl->gc_work);
1737	del_timer_sync(&tbl->proxy_timer);
1738	pneigh_queue_purge(&tbl->proxy_queue);
1739	neigh_ifdown(tbl, NULL);
1740	if (atomic_read(&tbl->entries))
1741		pr_crit("neighbour leakage\n");
1742
1743	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1744		 neigh_hash_free_rcu);
1745	tbl->nht = NULL;
1746
1747	kfree(tbl->phash_buckets);
1748	tbl->phash_buckets = NULL;
1749
1750	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1751
1752	free_percpu(tbl->stats);
1753	tbl->stats = NULL;
1754
1755	return 0;
1756}
1757EXPORT_SYMBOL(neigh_table_clear);
1758
1759static struct neigh_table *neigh_find_table(int family)
1760{
1761	struct neigh_table *tbl = NULL;
1762
1763	switch (family) {
1764	case AF_INET:
1765		tbl = neigh_tables[NEIGH_ARP_TABLE];
1766		break;
1767	case AF_INET6:
1768		tbl = neigh_tables[NEIGH_ND_TABLE];
1769		break;
1770	case AF_DECnet:
1771		tbl = neigh_tables[NEIGH_DN_TABLE];
1772		break;
1773	}
1774
1775	return tbl;
1776}
1777
1778const struct nla_policy nda_policy[NDA_MAX+1] = {
1779	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1780	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1781	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1782	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1783	[NDA_PROBES]		= { .type = NLA_U32 },
1784	[NDA_VLAN]		= { .type = NLA_U16 },
1785	[NDA_PORT]		= { .type = NLA_U16 },
1786	[NDA_VNI]		= { .type = NLA_U32 },
1787	[NDA_IFINDEX]		= { .type = NLA_U32 },
1788	[NDA_MASTER]		= { .type = NLA_U32 },
1789	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1790	[NDA_NH_ID]		= { .type = NLA_U32 },
 
1791	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1792};
1793
1794static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1795			struct netlink_ext_ack *extack)
1796{
1797	struct net *net = sock_net(skb->sk);
1798	struct ndmsg *ndm;
1799	struct nlattr *dst_attr;
1800	struct neigh_table *tbl;
1801	struct neighbour *neigh;
1802	struct net_device *dev = NULL;
1803	int err = -EINVAL;
1804
1805	ASSERT_RTNL();
1806	if (nlmsg_len(nlh) < sizeof(*ndm))
1807		goto out;
1808
1809	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1810	if (!dst_attr) {
1811		NL_SET_ERR_MSG(extack, "Network address not specified");
1812		goto out;
1813	}
1814
1815	ndm = nlmsg_data(nlh);
1816	if (ndm->ndm_ifindex) {
1817		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1818		if (dev == NULL) {
1819			err = -ENODEV;
1820			goto out;
1821		}
1822	}
1823
1824	tbl = neigh_find_table(ndm->ndm_family);
1825	if (tbl == NULL)
1826		return -EAFNOSUPPORT;
1827
1828	if (nla_len(dst_attr) < (int)tbl->key_len) {
1829		NL_SET_ERR_MSG(extack, "Invalid network address");
1830		goto out;
1831	}
1832
1833	if (ndm->ndm_flags & NTF_PROXY) {
1834		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1835		goto out;
1836	}
1837
1838	if (dev == NULL)
1839		goto out;
1840
1841	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1842	if (neigh == NULL) {
1843		err = -ENOENT;
1844		goto out;
1845	}
1846
1847	err = __neigh_update(neigh, NULL, NUD_FAILED,
1848			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1849			     NETLINK_CB(skb).portid, extack);
1850	write_lock_bh(&tbl->lock);
1851	neigh_release(neigh);
1852	neigh_remove_one(neigh, tbl);
1853	write_unlock_bh(&tbl->lock);
1854
1855out:
1856	return err;
1857}
1858
1859static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1860		     struct netlink_ext_ack *extack)
1861{
1862	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1863		NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1864	struct net *net = sock_net(skb->sk);
1865	struct ndmsg *ndm;
1866	struct nlattr *tb[NDA_MAX+1];
1867	struct neigh_table *tbl;
1868	struct net_device *dev = NULL;
1869	struct neighbour *neigh;
1870	void *dst, *lladdr;
1871	u8 protocol = 0;
 
1872	int err;
1873
1874	ASSERT_RTNL();
1875	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1876				     nda_policy, extack);
1877	if (err < 0)
1878		goto out;
1879
1880	err = -EINVAL;
1881	if (!tb[NDA_DST]) {
1882		NL_SET_ERR_MSG(extack, "Network address not specified");
1883		goto out;
1884	}
1885
1886	ndm = nlmsg_data(nlh);
 
 
 
 
 
 
 
 
 
1887	if (ndm->ndm_ifindex) {
1888		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1889		if (dev == NULL) {
1890			err = -ENODEV;
1891			goto out;
1892		}
1893
1894		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1895			NL_SET_ERR_MSG(extack, "Invalid link address");
1896			goto out;
1897		}
1898	}
1899
1900	tbl = neigh_find_table(ndm->ndm_family);
1901	if (tbl == NULL)
1902		return -EAFNOSUPPORT;
1903
1904	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1905		NL_SET_ERR_MSG(extack, "Invalid network address");
1906		goto out;
1907	}
1908
1909	dst = nla_data(tb[NDA_DST]);
1910	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1911
1912	if (tb[NDA_PROTOCOL])
1913		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1914
1915	if (ndm->ndm_flags & NTF_PROXY) {
1916		struct pneigh_entry *pn;
1917
 
 
 
 
 
1918		err = -ENOBUFS;
1919		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1920		if (pn) {
1921			pn->flags = ndm->ndm_flags;
1922			if (protocol)
1923				pn->protocol = protocol;
1924			err = 0;
1925		}
1926		goto out;
1927	}
1928
1929	if (!dev) {
1930		NL_SET_ERR_MSG(extack, "Device not specified");
1931		goto out;
1932	}
1933
1934	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1935		err = -EINVAL;
1936		goto out;
1937	}
1938
1939	neigh = neigh_lookup(tbl, dst, dev);
1940	if (neigh == NULL) {
1941		bool exempt_from_gc;
 
 
1942
1943		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1944			err = -ENOENT;
1945			goto out;
1946		}
 
 
 
 
 
1947
1948		exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1949				 ndm->ndm_flags & NTF_EXT_LEARNED;
1950		neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
 
1951		if (IS_ERR(neigh)) {
1952			err = PTR_ERR(neigh);
1953			goto out;
1954		}
1955	} else {
1956		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1957			err = -EEXIST;
1958			neigh_release(neigh);
1959			goto out;
1960		}
1961
1962		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1963			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1964				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1965	}
1966
1967	if (protocol)
1968		neigh->protocol = protocol;
1969
1970	if (ndm->ndm_flags & NTF_EXT_LEARNED)
1971		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1972
1973	if (ndm->ndm_flags & NTF_ROUTER)
1974		flags |= NEIGH_UPDATE_F_ISROUTER;
 
 
 
 
1975
1976	if (ndm->ndm_flags & NTF_USE) {
 
 
1977		neigh_event_send(neigh, NULL);
1978		err = 0;
1979	} else
1980		err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1981				     NETLINK_CB(skb).portid, extack);
1982
1983	neigh_release(neigh);
1984
1985out:
1986	return err;
1987}
1988
1989static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1990{
1991	struct nlattr *nest;
1992
1993	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
1994	if (nest == NULL)
1995		return -ENOBUFS;
1996
1997	if ((parms->dev &&
1998	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1999	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2000	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2001			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2002	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2003	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2004			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2005	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2006	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2007	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2008			NEIGH_VAR(parms, UCAST_PROBES)) ||
2009	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2010			NEIGH_VAR(parms, MCAST_PROBES)) ||
2011	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2012			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2013	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2014			  NDTPA_PAD) ||
2015	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2016			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2017	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2018			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2019	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2020			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2021	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2022			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2023	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2024			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2025	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2026			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2027	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2028			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
 
 
2029		goto nla_put_failure;
2030	return nla_nest_end(skb, nest);
2031
2032nla_put_failure:
2033	nla_nest_cancel(skb, nest);
2034	return -EMSGSIZE;
2035}
2036
2037static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2038			      u32 pid, u32 seq, int type, int flags)
2039{
2040	struct nlmsghdr *nlh;
2041	struct ndtmsg *ndtmsg;
2042
2043	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2044	if (nlh == NULL)
2045		return -EMSGSIZE;
2046
2047	ndtmsg = nlmsg_data(nlh);
2048
2049	read_lock_bh(&tbl->lock);
2050	ndtmsg->ndtm_family = tbl->family;
2051	ndtmsg->ndtm_pad1   = 0;
2052	ndtmsg->ndtm_pad2   = 0;
2053
2054	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2055	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2056	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2057	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2058	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
 
2059		goto nla_put_failure;
2060	{
2061		unsigned long now = jiffies;
2062		long flush_delta = now - tbl->last_flush;
2063		long rand_delta = now - tbl->last_rand;
2064		struct neigh_hash_table *nht;
2065		struct ndt_config ndc = {
2066			.ndtc_key_len		= tbl->key_len,
2067			.ndtc_entry_size	= tbl->entry_size,
2068			.ndtc_entries		= atomic_read(&tbl->entries),
2069			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2070			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2071			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
2072		};
2073
2074		rcu_read_lock_bh();
2075		nht = rcu_dereference_bh(tbl->nht);
2076		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2077		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2078		rcu_read_unlock_bh();
2079
2080		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2081			goto nla_put_failure;
2082	}
2083
2084	{
2085		int cpu;
2086		struct ndt_stats ndst;
2087
2088		memset(&ndst, 0, sizeof(ndst));
2089
2090		for_each_possible_cpu(cpu) {
2091			struct neigh_statistics	*st;
2092
2093			st = per_cpu_ptr(tbl->stats, cpu);
2094			ndst.ndts_allocs		+= st->allocs;
2095			ndst.ndts_destroys		+= st->destroys;
2096			ndst.ndts_hash_grows		+= st->hash_grows;
2097			ndst.ndts_res_failed		+= st->res_failed;
2098			ndst.ndts_lookups		+= st->lookups;
2099			ndst.ndts_hits			+= st->hits;
2100			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
2101			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
2102			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
2103			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
2104			ndst.ndts_table_fulls		+= st->table_fulls;
2105		}
2106
2107		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2108				  NDTA_PAD))
2109			goto nla_put_failure;
2110	}
2111
2112	BUG_ON(tbl->parms.dev);
2113	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2114		goto nla_put_failure;
2115
2116	read_unlock_bh(&tbl->lock);
2117	nlmsg_end(skb, nlh);
2118	return 0;
2119
2120nla_put_failure:
2121	read_unlock_bh(&tbl->lock);
2122	nlmsg_cancel(skb, nlh);
2123	return -EMSGSIZE;
2124}
2125
2126static int neightbl_fill_param_info(struct sk_buff *skb,
2127				    struct neigh_table *tbl,
2128				    struct neigh_parms *parms,
2129				    u32 pid, u32 seq, int type,
2130				    unsigned int flags)
2131{
2132	struct ndtmsg *ndtmsg;
2133	struct nlmsghdr *nlh;
2134
2135	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2136	if (nlh == NULL)
2137		return -EMSGSIZE;
2138
2139	ndtmsg = nlmsg_data(nlh);
2140
2141	read_lock_bh(&tbl->lock);
2142	ndtmsg->ndtm_family = tbl->family;
2143	ndtmsg->ndtm_pad1   = 0;
2144	ndtmsg->ndtm_pad2   = 0;
2145
2146	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2147	    neightbl_fill_parms(skb, parms) < 0)
2148		goto errout;
2149
2150	read_unlock_bh(&tbl->lock);
2151	nlmsg_end(skb, nlh);
2152	return 0;
2153errout:
2154	read_unlock_bh(&tbl->lock);
2155	nlmsg_cancel(skb, nlh);
2156	return -EMSGSIZE;
2157}
2158
2159static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2160	[NDTA_NAME]		= { .type = NLA_STRING },
2161	[NDTA_THRESH1]		= { .type = NLA_U32 },
2162	[NDTA_THRESH2]		= { .type = NLA_U32 },
2163	[NDTA_THRESH3]		= { .type = NLA_U32 },
2164	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2165	[NDTA_PARMS]		= { .type = NLA_NESTED },
2166};
2167
2168static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2169	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2170	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2171	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2172	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2173	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2174	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2175	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2176	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2177	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2178	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2179	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2180	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2181	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2182	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
 
2183};
2184
2185static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2186			struct netlink_ext_ack *extack)
2187{
2188	struct net *net = sock_net(skb->sk);
2189	struct neigh_table *tbl;
2190	struct ndtmsg *ndtmsg;
2191	struct nlattr *tb[NDTA_MAX+1];
2192	bool found = false;
2193	int err, tidx;
2194
2195	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2196				     nl_neightbl_policy, extack);
2197	if (err < 0)
2198		goto errout;
2199
2200	if (tb[NDTA_NAME] == NULL) {
2201		err = -EINVAL;
2202		goto errout;
2203	}
2204
2205	ndtmsg = nlmsg_data(nlh);
2206
2207	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2208		tbl = neigh_tables[tidx];
2209		if (!tbl)
2210			continue;
2211		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2212			continue;
2213		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2214			found = true;
2215			break;
2216		}
2217	}
2218
2219	if (!found)
2220		return -ENOENT;
2221
2222	/*
2223	 * We acquire tbl->lock to be nice to the periodic timers and
2224	 * make sure they always see a consistent set of values.
2225	 */
2226	write_lock_bh(&tbl->lock);
2227
2228	if (tb[NDTA_PARMS]) {
2229		struct nlattr *tbp[NDTPA_MAX+1];
2230		struct neigh_parms *p;
2231		int i, ifindex = 0;
2232
2233		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2234						  tb[NDTA_PARMS],
2235						  nl_ntbl_parm_policy, extack);
2236		if (err < 0)
2237			goto errout_tbl_lock;
2238
2239		if (tbp[NDTPA_IFINDEX])
2240			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2241
2242		p = lookup_neigh_parms(tbl, net, ifindex);
2243		if (p == NULL) {
2244			err = -ENOENT;
2245			goto errout_tbl_lock;
2246		}
2247
2248		for (i = 1; i <= NDTPA_MAX; i++) {
2249			if (tbp[i] == NULL)
2250				continue;
2251
2252			switch (i) {
2253			case NDTPA_QUEUE_LEN:
2254				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2255					      nla_get_u32(tbp[i]) *
2256					      SKB_TRUESIZE(ETH_FRAME_LEN));
2257				break;
2258			case NDTPA_QUEUE_LENBYTES:
2259				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2260					      nla_get_u32(tbp[i]));
2261				break;
2262			case NDTPA_PROXY_QLEN:
2263				NEIGH_VAR_SET(p, PROXY_QLEN,
2264					      nla_get_u32(tbp[i]));
2265				break;
2266			case NDTPA_APP_PROBES:
2267				NEIGH_VAR_SET(p, APP_PROBES,
2268					      nla_get_u32(tbp[i]));
2269				break;
2270			case NDTPA_UCAST_PROBES:
2271				NEIGH_VAR_SET(p, UCAST_PROBES,
2272					      nla_get_u32(tbp[i]));
2273				break;
2274			case NDTPA_MCAST_PROBES:
2275				NEIGH_VAR_SET(p, MCAST_PROBES,
2276					      nla_get_u32(tbp[i]));
2277				break;
2278			case NDTPA_MCAST_REPROBES:
2279				NEIGH_VAR_SET(p, MCAST_REPROBES,
2280					      nla_get_u32(tbp[i]));
2281				break;
2282			case NDTPA_BASE_REACHABLE_TIME:
2283				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2284					      nla_get_msecs(tbp[i]));
2285				/* update reachable_time as well, otherwise, the change will
2286				 * only be effective after the next time neigh_periodic_work
2287				 * decides to recompute it (can be multiple minutes)
2288				 */
2289				p->reachable_time =
2290					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2291				break;
2292			case NDTPA_GC_STALETIME:
2293				NEIGH_VAR_SET(p, GC_STALETIME,
2294					      nla_get_msecs(tbp[i]));
2295				break;
2296			case NDTPA_DELAY_PROBE_TIME:
2297				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2298					      nla_get_msecs(tbp[i]));
2299				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2300				break;
 
 
 
 
2301			case NDTPA_RETRANS_TIME:
2302				NEIGH_VAR_SET(p, RETRANS_TIME,
2303					      nla_get_msecs(tbp[i]));
2304				break;
2305			case NDTPA_ANYCAST_DELAY:
2306				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2307					      nla_get_msecs(tbp[i]));
2308				break;
2309			case NDTPA_PROXY_DELAY:
2310				NEIGH_VAR_SET(p, PROXY_DELAY,
2311					      nla_get_msecs(tbp[i]));
2312				break;
2313			case NDTPA_LOCKTIME:
2314				NEIGH_VAR_SET(p, LOCKTIME,
2315					      nla_get_msecs(tbp[i]));
2316				break;
2317			}
2318		}
2319	}
2320
2321	err = -ENOENT;
2322	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2323	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2324	    !net_eq(net, &init_net))
2325		goto errout_tbl_lock;
2326
2327	if (tb[NDTA_THRESH1])
2328		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2329
2330	if (tb[NDTA_THRESH2])
2331		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2332
2333	if (tb[NDTA_THRESH3])
2334		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2335
2336	if (tb[NDTA_GC_INTERVAL])
2337		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2338
2339	err = 0;
2340
2341errout_tbl_lock:
2342	write_unlock_bh(&tbl->lock);
2343errout:
2344	return err;
2345}
2346
2347static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2348				    struct netlink_ext_ack *extack)
2349{
2350	struct ndtmsg *ndtm;
2351
2352	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2353		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2354		return -EINVAL;
2355	}
2356
2357	ndtm = nlmsg_data(nlh);
2358	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2359		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2360		return -EINVAL;
2361	}
2362
2363	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2364		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2365		return -EINVAL;
2366	}
2367
2368	return 0;
2369}
2370
2371static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2372{
2373	const struct nlmsghdr *nlh = cb->nlh;
2374	struct net *net = sock_net(skb->sk);
2375	int family, tidx, nidx = 0;
2376	int tbl_skip = cb->args[0];
2377	int neigh_skip = cb->args[1];
2378	struct neigh_table *tbl;
2379
2380	if (cb->strict_check) {
2381		int err = neightbl_valid_dump_info(nlh, cb->extack);
2382
2383		if (err < 0)
2384			return err;
2385	}
2386
2387	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2388
2389	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2390		struct neigh_parms *p;
2391
2392		tbl = neigh_tables[tidx];
2393		if (!tbl)
2394			continue;
2395
2396		if (tidx < tbl_skip || (family && tbl->family != family))
2397			continue;
2398
2399		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2400				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2401				       NLM_F_MULTI) < 0)
2402			break;
2403
2404		nidx = 0;
2405		p = list_next_entry(&tbl->parms, list);
2406		list_for_each_entry_from(p, &tbl->parms_list, list) {
2407			if (!net_eq(neigh_parms_net(p), net))
2408				continue;
2409
2410			if (nidx < neigh_skip)
2411				goto next;
2412
2413			if (neightbl_fill_param_info(skb, tbl, p,
2414						     NETLINK_CB(cb->skb).portid,
2415						     nlh->nlmsg_seq,
2416						     RTM_NEWNEIGHTBL,
2417						     NLM_F_MULTI) < 0)
2418				goto out;
2419		next:
2420			nidx++;
2421		}
2422
2423		neigh_skip = 0;
2424	}
2425out:
2426	cb->args[0] = tidx;
2427	cb->args[1] = nidx;
2428
2429	return skb->len;
2430}
2431
2432static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2433			   u32 pid, u32 seq, int type, unsigned int flags)
2434{
 
2435	unsigned long now = jiffies;
2436	struct nda_cacheinfo ci;
2437	struct nlmsghdr *nlh;
2438	struct ndmsg *ndm;
2439
2440	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2441	if (nlh == NULL)
2442		return -EMSGSIZE;
2443
 
 
 
2444	ndm = nlmsg_data(nlh);
2445	ndm->ndm_family	 = neigh->ops->family;
2446	ndm->ndm_pad1    = 0;
2447	ndm->ndm_pad2    = 0;
2448	ndm->ndm_flags	 = neigh->flags;
2449	ndm->ndm_type	 = neigh->type;
2450	ndm->ndm_ifindex = neigh->dev->ifindex;
2451
2452	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2453		goto nla_put_failure;
2454
2455	read_lock_bh(&neigh->lock);
2456	ndm->ndm_state	 = neigh->nud_state;
2457	if (neigh->nud_state & NUD_VALID) {
2458		char haddr[MAX_ADDR_LEN];
2459
2460		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2461		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2462			read_unlock_bh(&neigh->lock);
2463			goto nla_put_failure;
2464		}
2465	}
2466
2467	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2468	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2469	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2470	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2471	read_unlock_bh(&neigh->lock);
2472
2473	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2474	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2475		goto nla_put_failure;
2476
2477	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2478		goto nla_put_failure;
 
 
2479
2480	nlmsg_end(skb, nlh);
2481	return 0;
2482
2483nla_put_failure:
2484	nlmsg_cancel(skb, nlh);
2485	return -EMSGSIZE;
2486}
2487
2488static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2489			    u32 pid, u32 seq, int type, unsigned int flags,
2490			    struct neigh_table *tbl)
2491{
 
2492	struct nlmsghdr *nlh;
2493	struct ndmsg *ndm;
2494
2495	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2496	if (nlh == NULL)
2497		return -EMSGSIZE;
2498
 
 
 
2499	ndm = nlmsg_data(nlh);
2500	ndm->ndm_family	 = tbl->family;
2501	ndm->ndm_pad1    = 0;
2502	ndm->ndm_pad2    = 0;
2503	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2504	ndm->ndm_type	 = RTN_UNICAST;
2505	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2506	ndm->ndm_state	 = NUD_NONE;
2507
2508	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2509		goto nla_put_failure;
2510
2511	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2512		goto nla_put_failure;
 
 
2513
2514	nlmsg_end(skb, nlh);
2515	return 0;
2516
2517nla_put_failure:
2518	nlmsg_cancel(skb, nlh);
2519	return -EMSGSIZE;
2520}
2521
2522static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2523{
2524	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2525	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2526}
2527
2528static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2529{
2530	struct net_device *master;
2531
2532	if (!master_idx)
2533		return false;
2534
2535	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
 
 
 
 
 
 
 
2536	if (!master || master->ifindex != master_idx)
2537		return true;
2538
2539	return false;
2540}
2541
2542static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2543{
2544	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2545		return true;
2546
2547	return false;
2548}
2549
2550struct neigh_dump_filter {
2551	int master_idx;
2552	int dev_idx;
2553};
2554
2555static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2556			    struct netlink_callback *cb,
2557			    struct neigh_dump_filter *filter)
2558{
2559	struct net *net = sock_net(skb->sk);
2560	struct neighbour *n;
2561	int rc, h, s_h = cb->args[1];
2562	int idx, s_idx = idx = cb->args[2];
2563	struct neigh_hash_table *nht;
2564	unsigned int flags = NLM_F_MULTI;
2565
2566	if (filter->dev_idx || filter->master_idx)
2567		flags |= NLM_F_DUMP_FILTERED;
2568
2569	rcu_read_lock_bh();
2570	nht = rcu_dereference_bh(tbl->nht);
2571
2572	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2573		if (h > s_h)
2574			s_idx = 0;
2575		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2576		     n != NULL;
2577		     n = rcu_dereference_bh(n->next)) {
2578			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2579				goto next;
2580			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2581			    neigh_master_filtered(n->dev, filter->master_idx))
2582				goto next;
2583			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2584					    cb->nlh->nlmsg_seq,
2585					    RTM_NEWNEIGH,
2586					    flags) < 0) {
2587				rc = -1;
2588				goto out;
2589			}
2590next:
2591			idx++;
2592		}
2593	}
2594	rc = skb->len;
2595out:
2596	rcu_read_unlock_bh();
2597	cb->args[1] = h;
2598	cb->args[2] = idx;
2599	return rc;
2600}
2601
2602static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2603			     struct netlink_callback *cb,
2604			     struct neigh_dump_filter *filter)
2605{
2606	struct pneigh_entry *n;
2607	struct net *net = sock_net(skb->sk);
2608	int rc, h, s_h = cb->args[3];
2609	int idx, s_idx = idx = cb->args[4];
2610	unsigned int flags = NLM_F_MULTI;
2611
2612	if (filter->dev_idx || filter->master_idx)
2613		flags |= NLM_F_DUMP_FILTERED;
2614
2615	read_lock_bh(&tbl->lock);
2616
2617	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2618		if (h > s_h)
2619			s_idx = 0;
2620		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2621			if (idx < s_idx || pneigh_net(n) != net)
2622				goto next;
2623			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2624			    neigh_master_filtered(n->dev, filter->master_idx))
2625				goto next;
2626			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2627					    cb->nlh->nlmsg_seq,
2628					    RTM_NEWNEIGH, flags, tbl) < 0) {
2629				read_unlock_bh(&tbl->lock);
2630				rc = -1;
2631				goto out;
2632			}
2633		next:
2634			idx++;
2635		}
2636	}
2637
2638	read_unlock_bh(&tbl->lock);
2639	rc = skb->len;
2640out:
2641	cb->args[3] = h;
2642	cb->args[4] = idx;
2643	return rc;
2644
2645}
2646
2647static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2648				bool strict_check,
2649				struct neigh_dump_filter *filter,
2650				struct netlink_ext_ack *extack)
2651{
2652	struct nlattr *tb[NDA_MAX + 1];
2653	int err, i;
2654
2655	if (strict_check) {
2656		struct ndmsg *ndm;
2657
2658		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2659			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2660			return -EINVAL;
2661		}
2662
2663		ndm = nlmsg_data(nlh);
2664		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2665		    ndm->ndm_state || ndm->ndm_type) {
2666			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2667			return -EINVAL;
2668		}
2669
2670		if (ndm->ndm_flags & ~NTF_PROXY) {
2671			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2672			return -EINVAL;
2673		}
2674
2675		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2676						    tb, NDA_MAX, nda_policy,
2677						    extack);
2678	} else {
2679		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2680					     NDA_MAX, nda_policy, extack);
2681	}
2682	if (err < 0)
2683		return err;
2684
2685	for (i = 0; i <= NDA_MAX; ++i) {
2686		if (!tb[i])
2687			continue;
2688
2689		/* all new attributes should require strict_check */
2690		switch (i) {
2691		case NDA_IFINDEX:
2692			filter->dev_idx = nla_get_u32(tb[i]);
2693			break;
2694		case NDA_MASTER:
2695			filter->master_idx = nla_get_u32(tb[i]);
2696			break;
2697		default:
2698			if (strict_check) {
2699				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2700				return -EINVAL;
2701			}
2702		}
2703	}
2704
2705	return 0;
2706}
2707
2708static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2709{
2710	const struct nlmsghdr *nlh = cb->nlh;
2711	struct neigh_dump_filter filter = {};
2712	struct neigh_table *tbl;
2713	int t, family, s_t;
2714	int proxy = 0;
2715	int err;
2716
2717	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2718
2719	/* check for full ndmsg structure presence, family member is
2720	 * the same for both structures
2721	 */
2722	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2723	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2724		proxy = 1;
2725
2726	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2727	if (err < 0 && cb->strict_check)
2728		return err;
2729
2730	s_t = cb->args[0];
2731
2732	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2733		tbl = neigh_tables[t];
2734
2735		if (!tbl)
2736			continue;
2737		if (t < s_t || (family && tbl->family != family))
2738			continue;
2739		if (t > s_t)
2740			memset(&cb->args[1], 0, sizeof(cb->args) -
2741						sizeof(cb->args[0]));
2742		if (proxy)
2743			err = pneigh_dump_table(tbl, skb, cb, &filter);
2744		else
2745			err = neigh_dump_table(tbl, skb, cb, &filter);
2746		if (err < 0)
2747			break;
2748	}
2749
2750	cb->args[0] = t;
2751	return skb->len;
2752}
2753
2754static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2755			       struct neigh_table **tbl,
2756			       void **dst, int *dev_idx, u8 *ndm_flags,
2757			       struct netlink_ext_ack *extack)
2758{
2759	struct nlattr *tb[NDA_MAX + 1];
2760	struct ndmsg *ndm;
2761	int err, i;
2762
2763	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2764		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2765		return -EINVAL;
2766	}
2767
2768	ndm = nlmsg_data(nlh);
2769	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2770	    ndm->ndm_type) {
2771		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2772		return -EINVAL;
2773	}
2774
2775	if (ndm->ndm_flags & ~NTF_PROXY) {
2776		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2777		return -EINVAL;
2778	}
2779
2780	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2781					    NDA_MAX, nda_policy, extack);
2782	if (err < 0)
2783		return err;
2784
2785	*ndm_flags = ndm->ndm_flags;
2786	*dev_idx = ndm->ndm_ifindex;
2787	*tbl = neigh_find_table(ndm->ndm_family);
2788	if (*tbl == NULL) {
2789		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2790		return -EAFNOSUPPORT;
2791	}
2792
2793	for (i = 0; i <= NDA_MAX; ++i) {
2794		if (!tb[i])
2795			continue;
2796
2797		switch (i) {
2798		case NDA_DST:
2799			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2800				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2801				return -EINVAL;
2802			}
2803			*dst = nla_data(tb[i]);
2804			break;
2805		default:
2806			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2807			return -EINVAL;
2808		}
2809	}
2810
2811	return 0;
2812}
2813
2814static inline size_t neigh_nlmsg_size(void)
2815{
2816	return NLMSG_ALIGN(sizeof(struct ndmsg))
2817	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2818	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2819	       + nla_total_size(sizeof(struct nda_cacheinfo))
2820	       + nla_total_size(4)  /* NDA_PROBES */
 
2821	       + nla_total_size(1); /* NDA_PROTOCOL */
2822}
2823
2824static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2825			   u32 pid, u32 seq)
2826{
2827	struct sk_buff *skb;
2828	int err = 0;
2829
2830	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2831	if (!skb)
2832		return -ENOBUFS;
2833
2834	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2835	if (err) {
2836		kfree_skb(skb);
2837		goto errout;
2838	}
2839
2840	err = rtnl_unicast(skb, net, pid);
2841errout:
2842	return err;
2843}
2844
2845static inline size_t pneigh_nlmsg_size(void)
2846{
2847	return NLMSG_ALIGN(sizeof(struct ndmsg))
2848	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
 
2849	       + nla_total_size(1); /* NDA_PROTOCOL */
2850}
2851
2852static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2853			    u32 pid, u32 seq, struct neigh_table *tbl)
2854{
2855	struct sk_buff *skb;
2856	int err = 0;
2857
2858	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2859	if (!skb)
2860		return -ENOBUFS;
2861
2862	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2863	if (err) {
2864		kfree_skb(skb);
2865		goto errout;
2866	}
2867
2868	err = rtnl_unicast(skb, net, pid);
2869errout:
2870	return err;
2871}
2872
2873static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2874		     struct netlink_ext_ack *extack)
2875{
2876	struct net *net = sock_net(in_skb->sk);
2877	struct net_device *dev = NULL;
2878	struct neigh_table *tbl = NULL;
2879	struct neighbour *neigh;
2880	void *dst = NULL;
2881	u8 ndm_flags = 0;
2882	int dev_idx = 0;
2883	int err;
2884
2885	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2886				  extack);
2887	if (err < 0)
2888		return err;
2889
2890	if (dev_idx) {
2891		dev = __dev_get_by_index(net, dev_idx);
2892		if (!dev) {
2893			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2894			return -ENODEV;
2895		}
2896	}
2897
2898	if (!dst) {
2899		NL_SET_ERR_MSG(extack, "Network address not specified");
2900		return -EINVAL;
2901	}
2902
2903	if (ndm_flags & NTF_PROXY) {
2904		struct pneigh_entry *pn;
2905
2906		pn = pneigh_lookup(tbl, net, dst, dev, 0);
2907		if (!pn) {
2908			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2909			return -ENOENT;
2910		}
2911		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2912					nlh->nlmsg_seq, tbl);
2913	}
2914
2915	if (!dev) {
2916		NL_SET_ERR_MSG(extack, "No device specified");
2917		return -EINVAL;
2918	}
2919
2920	neigh = neigh_lookup(tbl, dst, dev);
2921	if (!neigh) {
2922		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2923		return -ENOENT;
2924	}
2925
2926	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2927			      nlh->nlmsg_seq);
2928
2929	neigh_release(neigh);
2930
2931	return err;
2932}
2933
2934void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2935{
2936	int chain;
2937	struct neigh_hash_table *nht;
2938
2939	rcu_read_lock_bh();
2940	nht = rcu_dereference_bh(tbl->nht);
2941
2942	read_lock(&tbl->lock); /* avoid resizes */
2943	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2944		struct neighbour *n;
2945
2946		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2947		     n != NULL;
2948		     n = rcu_dereference_bh(n->next))
2949			cb(n, cookie);
2950	}
2951	read_unlock(&tbl->lock);
2952	rcu_read_unlock_bh();
2953}
2954EXPORT_SYMBOL(neigh_for_each);
2955
2956/* The tbl->lock must be held as a writer and BH disabled. */
2957void __neigh_for_each_release(struct neigh_table *tbl,
2958			      int (*cb)(struct neighbour *))
2959{
2960	int chain;
2961	struct neigh_hash_table *nht;
2962
2963	nht = rcu_dereference_protected(tbl->nht,
2964					lockdep_is_held(&tbl->lock));
2965	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2966		struct neighbour *n;
2967		struct neighbour __rcu **np;
2968
2969		np = &nht->hash_buckets[chain];
2970		while ((n = rcu_dereference_protected(*np,
2971					lockdep_is_held(&tbl->lock))) != NULL) {
2972			int release;
2973
2974			write_lock(&n->lock);
2975			release = cb(n);
2976			if (release) {
2977				rcu_assign_pointer(*np,
2978					rcu_dereference_protected(n->next,
2979						lockdep_is_held(&tbl->lock)));
2980				neigh_mark_dead(n);
2981			} else
2982				np = &n->next;
2983			write_unlock(&n->lock);
2984			if (release)
2985				neigh_cleanup_and_release(n);
2986		}
2987	}
2988}
2989EXPORT_SYMBOL(__neigh_for_each_release);
2990
2991int neigh_xmit(int index, struct net_device *dev,
2992	       const void *addr, struct sk_buff *skb)
2993{
2994	int err = -EAFNOSUPPORT;
2995	if (likely(index < NEIGH_NR_TABLES)) {
2996		struct neigh_table *tbl;
2997		struct neighbour *neigh;
2998
2999		tbl = neigh_tables[index];
3000		if (!tbl)
3001			goto out;
3002		rcu_read_lock_bh();
3003		if (index == NEIGH_ARP_TABLE) {
3004			u32 key = *((u32 *)addr);
3005
3006			neigh = __ipv4_neigh_lookup_noref(dev, key);
3007		} else {
3008			neigh = __neigh_lookup_noref(tbl, addr, dev);
3009		}
3010		if (!neigh)
3011			neigh = __neigh_create(tbl, addr, dev, false);
3012		err = PTR_ERR(neigh);
3013		if (IS_ERR(neigh)) {
3014			rcu_read_unlock_bh();
3015			goto out_kfree_skb;
3016		}
3017		err = neigh->output(neigh, skb);
3018		rcu_read_unlock_bh();
3019	}
3020	else if (index == NEIGH_LINK_TABLE) {
3021		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3022				      addr, NULL, skb->len);
3023		if (err < 0)
3024			goto out_kfree_skb;
3025		err = dev_queue_xmit(skb);
3026	}
3027out:
3028	return err;
3029out_kfree_skb:
3030	kfree_skb(skb);
3031	goto out;
3032}
3033EXPORT_SYMBOL(neigh_xmit);
3034
3035#ifdef CONFIG_PROC_FS
3036
3037static struct neighbour *neigh_get_first(struct seq_file *seq)
3038{
3039	struct neigh_seq_state *state = seq->private;
3040	struct net *net = seq_file_net(seq);
3041	struct neigh_hash_table *nht = state->nht;
3042	struct neighbour *n = NULL;
3043	int bucket;
3044
3045	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3046	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3047		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3048
3049		while (n) {
3050			if (!net_eq(dev_net(n->dev), net))
3051				goto next;
3052			if (state->neigh_sub_iter) {
3053				loff_t fakep = 0;
3054				void *v;
3055
3056				v = state->neigh_sub_iter(state, n, &fakep);
3057				if (!v)
3058					goto next;
3059			}
3060			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3061				break;
3062			if (n->nud_state & ~NUD_NOARP)
3063				break;
3064next:
3065			n = rcu_dereference_bh(n->next);
3066		}
3067
3068		if (n)
3069			break;
3070	}
3071	state->bucket = bucket;
3072
3073	return n;
3074}
3075
3076static struct neighbour *neigh_get_next(struct seq_file *seq,
3077					struct neighbour *n,
3078					loff_t *pos)
3079{
3080	struct neigh_seq_state *state = seq->private;
3081	struct net *net = seq_file_net(seq);
3082	struct neigh_hash_table *nht = state->nht;
3083
3084	if (state->neigh_sub_iter) {
3085		void *v = state->neigh_sub_iter(state, n, pos);
3086		if (v)
3087			return n;
3088	}
3089	n = rcu_dereference_bh(n->next);
3090
3091	while (1) {
3092		while (n) {
3093			if (!net_eq(dev_net(n->dev), net))
3094				goto next;
3095			if (state->neigh_sub_iter) {
3096				void *v = state->neigh_sub_iter(state, n, pos);
3097				if (v)
3098					return n;
3099				goto next;
3100			}
3101			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3102				break;
3103
3104			if (n->nud_state & ~NUD_NOARP)
3105				break;
3106next:
3107			n = rcu_dereference_bh(n->next);
3108		}
3109
3110		if (n)
3111			break;
3112
3113		if (++state->bucket >= (1 << nht->hash_shift))
3114			break;
3115
3116		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3117	}
3118
3119	if (n && pos)
3120		--(*pos);
3121	return n;
3122}
3123
3124static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3125{
3126	struct neighbour *n = neigh_get_first(seq);
3127
3128	if (n) {
3129		--(*pos);
3130		while (*pos) {
3131			n = neigh_get_next(seq, n, pos);
3132			if (!n)
3133				break;
3134		}
3135	}
3136	return *pos ? NULL : n;
3137}
3138
3139static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3140{
3141	struct neigh_seq_state *state = seq->private;
3142	struct net *net = seq_file_net(seq);
3143	struct neigh_table *tbl = state->tbl;
3144	struct pneigh_entry *pn = NULL;
3145	int bucket;
3146
3147	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3148	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3149		pn = tbl->phash_buckets[bucket];
3150		while (pn && !net_eq(pneigh_net(pn), net))
3151			pn = pn->next;
3152		if (pn)
3153			break;
3154	}
3155	state->bucket = bucket;
3156
3157	return pn;
3158}
3159
3160static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3161					    struct pneigh_entry *pn,
3162					    loff_t *pos)
3163{
3164	struct neigh_seq_state *state = seq->private;
3165	struct net *net = seq_file_net(seq);
3166	struct neigh_table *tbl = state->tbl;
3167
3168	do {
3169		pn = pn->next;
3170	} while (pn && !net_eq(pneigh_net(pn), net));
3171
3172	while (!pn) {
3173		if (++state->bucket > PNEIGH_HASHMASK)
3174			break;
3175		pn = tbl->phash_buckets[state->bucket];
3176		while (pn && !net_eq(pneigh_net(pn), net))
3177			pn = pn->next;
3178		if (pn)
3179			break;
3180	}
3181
3182	if (pn && pos)
3183		--(*pos);
3184
3185	return pn;
3186}
3187
3188static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3189{
3190	struct pneigh_entry *pn = pneigh_get_first(seq);
3191
3192	if (pn) {
3193		--(*pos);
3194		while (*pos) {
3195			pn = pneigh_get_next(seq, pn, pos);
3196			if (!pn)
3197				break;
3198		}
3199	}
3200	return *pos ? NULL : pn;
3201}
3202
3203static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3204{
3205	struct neigh_seq_state *state = seq->private;
3206	void *rc;
3207	loff_t idxpos = *pos;
3208
3209	rc = neigh_get_idx(seq, &idxpos);
3210	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3211		rc = pneigh_get_idx(seq, &idxpos);
3212
3213	return rc;
3214}
3215
3216void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3217	__acquires(tbl->lock)
3218	__acquires(rcu_bh)
3219{
3220	struct neigh_seq_state *state = seq->private;
3221
3222	state->tbl = tbl;
3223	state->bucket = 0;
3224	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3225
3226	rcu_read_lock_bh();
3227	state->nht = rcu_dereference_bh(tbl->nht);
3228	read_lock(&tbl->lock);
3229
3230	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3231}
3232EXPORT_SYMBOL(neigh_seq_start);
3233
3234void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3235{
3236	struct neigh_seq_state *state;
3237	void *rc;
3238
3239	if (v == SEQ_START_TOKEN) {
3240		rc = neigh_get_first(seq);
3241		goto out;
3242	}
3243
3244	state = seq->private;
3245	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3246		rc = neigh_get_next(seq, v, NULL);
3247		if (rc)
3248			goto out;
3249		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3250			rc = pneigh_get_first(seq);
3251	} else {
3252		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3253		rc = pneigh_get_next(seq, v, NULL);
3254	}
3255out:
3256	++(*pos);
3257	return rc;
3258}
3259EXPORT_SYMBOL(neigh_seq_next);
3260
3261void neigh_seq_stop(struct seq_file *seq, void *v)
3262	__releases(tbl->lock)
3263	__releases(rcu_bh)
3264{
3265	struct neigh_seq_state *state = seq->private;
3266	struct neigh_table *tbl = state->tbl;
3267
3268	read_unlock(&tbl->lock);
3269	rcu_read_unlock_bh();
3270}
3271EXPORT_SYMBOL(neigh_seq_stop);
3272
3273/* statistics via seq_file */
3274
3275static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3276{
3277	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3278	int cpu;
3279
3280	if (*pos == 0)
3281		return SEQ_START_TOKEN;
3282
3283	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3284		if (!cpu_possible(cpu))
3285			continue;
3286		*pos = cpu+1;
3287		return per_cpu_ptr(tbl->stats, cpu);
3288	}
3289	return NULL;
3290}
3291
3292static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3293{
3294	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3295	int cpu;
3296
3297	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3298		if (!cpu_possible(cpu))
3299			continue;
3300		*pos = cpu+1;
3301		return per_cpu_ptr(tbl->stats, cpu);
3302	}
3303	(*pos)++;
3304	return NULL;
3305}
3306
3307static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3308{
3309
3310}
3311
3312static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3313{
3314	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3315	struct neigh_statistics *st = v;
3316
3317	if (v == SEQ_START_TOKEN) {
3318		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3319		return 0;
3320	}
3321
3322	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
3323			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
 
3324		   atomic_read(&tbl->entries),
3325
3326		   st->allocs,
3327		   st->destroys,
3328		   st->hash_grows,
3329
3330		   st->lookups,
3331		   st->hits,
3332
3333		   st->res_failed,
3334
3335		   st->rcv_probes_mcast,
3336		   st->rcv_probes_ucast,
3337
3338		   st->periodic_gc_runs,
3339		   st->forced_gc_runs,
3340		   st->unres_discards,
3341		   st->table_fulls
3342		   );
3343
3344	return 0;
3345}
3346
3347static const struct seq_operations neigh_stat_seq_ops = {
3348	.start	= neigh_stat_seq_start,
3349	.next	= neigh_stat_seq_next,
3350	.stop	= neigh_stat_seq_stop,
3351	.show	= neigh_stat_seq_show,
3352};
3353#endif /* CONFIG_PROC_FS */
3354
3355static void __neigh_notify(struct neighbour *n, int type, int flags,
3356			   u32 pid)
3357{
3358	struct net *net = dev_net(n->dev);
3359	struct sk_buff *skb;
3360	int err = -ENOBUFS;
3361
3362	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3363	if (skb == NULL)
3364		goto errout;
3365
3366	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3367	if (err < 0) {
3368		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3369		WARN_ON(err == -EMSGSIZE);
3370		kfree_skb(skb);
3371		goto errout;
3372	}
3373	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3374	return;
3375errout:
3376	if (err < 0)
3377		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3378}
3379
3380void neigh_app_ns(struct neighbour *n)
3381{
3382	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3383}
3384EXPORT_SYMBOL(neigh_app_ns);
3385
3386#ifdef CONFIG_SYSCTL
3387static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3388
3389static int proc_unres_qlen(struct ctl_table *ctl, int write,
3390			   void *buffer, size_t *lenp, loff_t *ppos)
3391{
3392	int size, ret;
3393	struct ctl_table tmp = *ctl;
3394
3395	tmp.extra1 = SYSCTL_ZERO;
3396	tmp.extra2 = &unres_qlen_max;
3397	tmp.data = &size;
3398
3399	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3400	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3401
3402	if (write && !ret)
3403		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3404	return ret;
3405}
3406
3407static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3408						   int family)
3409{
3410	switch (family) {
3411	case AF_INET:
3412		return __in_dev_arp_parms_get_rcu(dev);
3413	case AF_INET6:
3414		return __in6_dev_nd_parms_get_rcu(dev);
3415	}
3416	return NULL;
3417}
3418
3419static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3420				  int index)
3421{
3422	struct net_device *dev;
3423	int family = neigh_parms_family(p);
3424
3425	rcu_read_lock();
3426	for_each_netdev_rcu(net, dev) {
3427		struct neigh_parms *dst_p =
3428				neigh_get_dev_parms_rcu(dev, family);
3429
3430		if (dst_p && !test_bit(index, dst_p->data_state))
3431			dst_p->data[index] = p->data[index];
3432	}
3433	rcu_read_unlock();
3434}
3435
3436static void neigh_proc_update(struct ctl_table *ctl, int write)
3437{
3438	struct net_device *dev = ctl->extra1;
3439	struct neigh_parms *p = ctl->extra2;
3440	struct net *net = neigh_parms_net(p);
3441	int index = (int *) ctl->data - p->data;
3442
3443	if (!write)
3444		return;
3445
3446	set_bit(index, p->data_state);
3447	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3448		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3449	if (!dev) /* NULL dev means this is default value */
3450		neigh_copy_dflt_parms(net, p, index);
3451}
3452
3453static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3454					   void *buffer, size_t *lenp,
3455					   loff_t *ppos)
3456{
3457	struct ctl_table tmp = *ctl;
3458	int ret;
3459
3460	tmp.extra1 = SYSCTL_ZERO;
3461	tmp.extra2 = SYSCTL_INT_MAX;
3462
3463	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3464	neigh_proc_update(ctl, write);
3465	return ret;
3466}
3467
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3468int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3469			size_t *lenp, loff_t *ppos)
3470{
3471	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3472
3473	neigh_proc_update(ctl, write);
3474	return ret;
3475}
3476EXPORT_SYMBOL(neigh_proc_dointvec);
3477
3478int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3479				size_t *lenp, loff_t *ppos)
3480{
3481	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3482
3483	neigh_proc_update(ctl, write);
3484	return ret;
3485}
3486EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3487
3488static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3489					      void *buffer, size_t *lenp,
3490					      loff_t *ppos)
3491{
3492	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3493
3494	neigh_proc_update(ctl, write);
3495	return ret;
3496}
3497
3498int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3499				   void *buffer, size_t *lenp, loff_t *ppos)
3500{
3501	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3502
3503	neigh_proc_update(ctl, write);
3504	return ret;
3505}
3506EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3507
3508static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3509					  void *buffer, size_t *lenp,
3510					  loff_t *ppos)
3511{
3512	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3513
3514	neigh_proc_update(ctl, write);
3515	return ret;
3516}
3517
3518static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3519					  void *buffer, size_t *lenp,
3520					  loff_t *ppos)
3521{
3522	struct neigh_parms *p = ctl->extra2;
3523	int ret;
3524
3525	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3526		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3527	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3528		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3529	else
3530		ret = -1;
3531
3532	if (write && ret == 0) {
3533		/* update reachable_time as well, otherwise, the change will
3534		 * only be effective after the next time neigh_periodic_work
3535		 * decides to recompute it
3536		 */
3537		p->reachable_time =
3538			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3539	}
3540	return ret;
3541}
3542
3543#define NEIGH_PARMS_DATA_OFFSET(index)	\
3544	(&((struct neigh_parms *) 0)->data[index])
3545
3546#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3547	[NEIGH_VAR_ ## attr] = { \
3548		.procname	= name, \
3549		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3550		.maxlen		= sizeof(int), \
3551		.mode		= mval, \
3552		.proc_handler	= proc, \
3553	}
3554
3555#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3556	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3557
3558#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3559	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3560
3561#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3562	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3563
 
 
 
3564#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3565	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3566
3567#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3568	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3569
3570static struct neigh_sysctl_table {
3571	struct ctl_table_header *sysctl_header;
3572	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3573} neigh_sysctl_template __read_mostly = {
3574	.neigh_vars = {
3575		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3576		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3577		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3578		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3579		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3580		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3581		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
 
 
3582		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3583		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3584		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3585		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3586		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3587		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3588		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3589		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3590		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3591		[NEIGH_VAR_GC_INTERVAL] = {
3592			.procname	= "gc_interval",
3593			.maxlen		= sizeof(int),
3594			.mode		= 0644,
3595			.proc_handler	= proc_dointvec_jiffies,
3596		},
3597		[NEIGH_VAR_GC_THRESH1] = {
3598			.procname	= "gc_thresh1",
3599			.maxlen		= sizeof(int),
3600			.mode		= 0644,
3601			.extra1		= SYSCTL_ZERO,
3602			.extra2		= SYSCTL_INT_MAX,
3603			.proc_handler	= proc_dointvec_minmax,
3604		},
3605		[NEIGH_VAR_GC_THRESH2] = {
3606			.procname	= "gc_thresh2",
3607			.maxlen		= sizeof(int),
3608			.mode		= 0644,
3609			.extra1		= SYSCTL_ZERO,
3610			.extra2		= SYSCTL_INT_MAX,
3611			.proc_handler	= proc_dointvec_minmax,
3612		},
3613		[NEIGH_VAR_GC_THRESH3] = {
3614			.procname	= "gc_thresh3",
3615			.maxlen		= sizeof(int),
3616			.mode		= 0644,
3617			.extra1		= SYSCTL_ZERO,
3618			.extra2		= SYSCTL_INT_MAX,
3619			.proc_handler	= proc_dointvec_minmax,
3620		},
3621		{},
3622	},
3623};
3624
3625int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3626			  proc_handler *handler)
3627{
3628	int i;
3629	struct neigh_sysctl_table *t;
3630	const char *dev_name_source;
3631	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3632	char *p_name;
 
3633
3634	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3635	if (!t)
3636		goto err;
3637
3638	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3639		t->neigh_vars[i].data += (long) p;
3640		t->neigh_vars[i].extra1 = dev;
3641		t->neigh_vars[i].extra2 = p;
3642	}
3643
 
3644	if (dev) {
3645		dev_name_source = dev->name;
3646		/* Terminate the table early */
3647		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3648		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
 
3649	} else {
3650		struct neigh_table *tbl = p->tbl;
3651		dev_name_source = "default";
3652		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3653		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3654		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3655		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3656	}
3657
3658	if (handler) {
3659		/* RetransTime */
3660		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3661		/* ReachableTime */
3662		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3663		/* RetransTime (in milliseconds)*/
3664		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3665		/* ReachableTime (in milliseconds) */
3666		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3667	} else {
3668		/* Those handlers will update p->reachable_time after
3669		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3670		 * applied after the next neighbour update instead of waiting for
3671		 * neigh_periodic_work to update its value (can be multiple minutes)
3672		 * So any handler that replaces them should do this as well
3673		 */
3674		/* ReachableTime */
3675		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3676			neigh_proc_base_reachable_time;
3677		/* ReachableTime (in milliseconds) */
3678		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3679			neigh_proc_base_reachable_time;
3680	}
3681
3682	/* Don't export sysctls to unprivileged users */
3683	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3684		t->neigh_vars[0].procname = NULL;
3685
3686	switch (neigh_parms_family(p)) {
3687	case AF_INET:
3688	      p_name = "ipv4";
3689	      break;
3690	case AF_INET6:
3691	      p_name = "ipv6";
3692	      break;
3693	default:
3694	      BUG();
3695	}
3696
3697	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3698		p_name, dev_name_source);
3699	t->sysctl_header =
3700		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
 
3701	if (!t->sysctl_header)
3702		goto free;
3703
3704	p->sysctl_table = t;
3705	return 0;
3706
3707free:
3708	kfree(t);
3709err:
3710	return -ENOBUFS;
3711}
3712EXPORT_SYMBOL(neigh_sysctl_register);
3713
3714void neigh_sysctl_unregister(struct neigh_parms *p)
3715{
3716	if (p->sysctl_table) {
3717		struct neigh_sysctl_table *t = p->sysctl_table;
3718		p->sysctl_table = NULL;
3719		unregister_net_sysctl_table(t->sysctl_header);
3720		kfree(t);
3721	}
3722}
3723EXPORT_SYMBOL(neigh_sysctl_unregister);
3724
3725#endif	/* CONFIG_SYSCTL */
3726
3727static int __init neigh_init(void)
3728{
3729	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3730	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3731	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3732
3733	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3734		      0);
3735	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3736
3737	return 0;
3738}
3739
3740subsys_initcall(neigh_init);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Generic address resolution entity
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   8 *
   9 *	Fixes:
  10 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  11 *	Harald Welte		Add neighbour cache statistics like rtstat
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/slab.h>
  17#include <linux/kmemleak.h>
  18#include <linux/types.h>
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/socket.h>
  22#include <linux/netdevice.h>
  23#include <linux/proc_fs.h>
  24#ifdef CONFIG_SYSCTL
  25#include <linux/sysctl.h>
  26#endif
  27#include <linux/times.h>
  28#include <net/net_namespace.h>
  29#include <net/neighbour.h>
  30#include <net/arp.h>
  31#include <net/dst.h>
  32#include <net/sock.h>
  33#include <net/netevent.h>
  34#include <net/netlink.h>
  35#include <linux/rtnetlink.h>
  36#include <linux/random.h>
  37#include <linux/string.h>
  38#include <linux/log2.h>
  39#include <linux/inetdevice.h>
  40#include <net/addrconf.h>
  41
  42#include <trace/events/neigh.h>
  43
  44#define NEIGH_DEBUG 1
  45#define neigh_dbg(level, fmt, ...)		\
  46do {						\
  47	if (level <= NEIGH_DEBUG)		\
  48		pr_debug(fmt, ##__VA_ARGS__);	\
  49} while (0)
  50
  51#define PNEIGH_HASHMASK		0xF
  52
  53static void neigh_timer_handler(struct timer_list *t);
  54static void __neigh_notify(struct neighbour *n, int type, int flags,
  55			   u32 pid);
  56static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
  57static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
  58				    struct net_device *dev);
  59
  60#ifdef CONFIG_PROC_FS
  61static const struct seq_operations neigh_stat_seq_ops;
  62#endif
  63
  64/*
  65   Neighbour hash table buckets are protected with rwlock tbl->lock.
  66
  67   - All the scans/updates to hash buckets MUST be made under this lock.
  68   - NOTHING clever should be made under this lock: no callbacks
  69     to protocol backends, no attempts to send something to network.
  70     It will result in deadlocks, if backend/driver wants to use neighbour
  71     cache.
  72   - If the entry requires some non-trivial actions, increase
  73     its reference count and release table lock.
  74
  75   Neighbour entries are protected:
  76   - with reference count.
  77   - with rwlock neigh->lock
  78
  79   Reference count prevents destruction.
  80
  81   neigh->lock mainly serializes ll address data and its validity state.
  82   However, the same lock is used to protect another entry fields:
  83    - timer
  84    - resolution queue
  85
  86   Again, nothing clever shall be made under neigh->lock,
  87   the most complicated procedure, which we allow is dev->hard_header.
  88   It is supposed, that dev->hard_header is simplistic and does
  89   not make callbacks to neighbour tables.
  90 */
  91
  92static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
  93{
  94	kfree_skb(skb);
  95	return -ENETDOWN;
  96}
  97
  98static void neigh_cleanup_and_release(struct neighbour *neigh)
  99{
 100	trace_neigh_cleanup_and_release(neigh, 0);
 101	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
 102	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
 103	neigh_release(neigh);
 104}
 105
 106/*
 107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 108 * It corresponds to default IPv6 settings and is not overridable,
 109 * because it is really reasonable choice.
 110 */
 111
 112unsigned long neigh_rand_reach_time(unsigned long base)
 113{
 114	return base ? get_random_u32_below(base) + (base >> 1) : 0;
 115}
 116EXPORT_SYMBOL(neigh_rand_reach_time);
 117
 118static void neigh_mark_dead(struct neighbour *n)
 119{
 120	n->dead = 1;
 121	if (!list_empty(&n->gc_list)) {
 122		list_del_init(&n->gc_list);
 123		atomic_dec(&n->tbl->gc_entries);
 124	}
 125	if (!list_empty(&n->managed_list))
 126		list_del_init(&n->managed_list);
 127}
 128
 129static void neigh_update_gc_list(struct neighbour *n)
 130{
 131	bool on_gc_list, exempt_from_gc;
 132
 133	write_lock_bh(&n->tbl->lock);
 134	write_lock(&n->lock);
 
 135	if (n->dead)
 136		goto out;
 137
 138	/* remove from the gc list if new state is permanent or if neighbor
 139	 * is externally learned; otherwise entry should be on the gc list
 140	 */
 141	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
 142			 n->flags & NTF_EXT_LEARNED;
 143	on_gc_list = !list_empty(&n->gc_list);
 144
 145	if (exempt_from_gc && on_gc_list) {
 146		list_del_init(&n->gc_list);
 147		atomic_dec(&n->tbl->gc_entries);
 148	} else if (!exempt_from_gc && !on_gc_list) {
 149		/* add entries to the tail; cleaning removes from the front */
 150		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 151		atomic_inc(&n->tbl->gc_entries);
 152	}
 153out:
 154	write_unlock(&n->lock);
 155	write_unlock_bh(&n->tbl->lock);
 156}
 157
 158static void neigh_update_managed_list(struct neighbour *n)
 159{
 160	bool on_managed_list, add_to_managed;
 161
 162	write_lock_bh(&n->tbl->lock);
 163	write_lock(&n->lock);
 164	if (n->dead)
 165		goto out;
 166
 167	add_to_managed = n->flags & NTF_MANAGED;
 168	on_managed_list = !list_empty(&n->managed_list);
 169
 170	if (!add_to_managed && on_managed_list)
 171		list_del_init(&n->managed_list);
 172	else if (add_to_managed && !on_managed_list)
 173		list_add_tail(&n->managed_list, &n->tbl->managed_list);
 174out:
 175	write_unlock(&n->lock);
 176	write_unlock_bh(&n->tbl->lock);
 177}
 178
 179static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
 180			       bool *gc_update, bool *managed_update)
 181{
 182	u32 ndm_flags, old_flags = neigh->flags;
 
 183
 184	if (!(flags & NEIGH_UPDATE_F_ADMIN))
 185		return;
 186
 187	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
 188	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
 189
 190	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
 191		if (ndm_flags & NTF_EXT_LEARNED)
 192			neigh->flags |= NTF_EXT_LEARNED;
 193		else
 194			neigh->flags &= ~NTF_EXT_LEARNED;
 
 195		*notify = 1;
 196		*gc_update = true;
 197	}
 198	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
 199		if (ndm_flags & NTF_MANAGED)
 200			neigh->flags |= NTF_MANAGED;
 201		else
 202			neigh->flags &= ~NTF_MANAGED;
 203		*notify = 1;
 204		*managed_update = true;
 205	}
 
 
 206}
 207
 208static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
 209		      struct neigh_table *tbl)
 210{
 211	bool retval = false;
 212
 213	write_lock(&n->lock);
 214	if (refcount_read(&n->refcnt) == 1) {
 215		struct neighbour *neigh;
 216
 217		neigh = rcu_dereference_protected(n->next,
 218						  lockdep_is_held(&tbl->lock));
 219		rcu_assign_pointer(*np, neigh);
 220		neigh_mark_dead(n);
 221		retval = true;
 222	}
 223	write_unlock(&n->lock);
 224	if (retval)
 225		neigh_cleanup_and_release(n);
 226	return retval;
 227}
 228
 229bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
 230{
 231	struct neigh_hash_table *nht;
 232	void *pkey = ndel->primary_key;
 233	u32 hash_val;
 234	struct neighbour *n;
 235	struct neighbour __rcu **np;
 236
 237	nht = rcu_dereference_protected(tbl->nht,
 238					lockdep_is_held(&tbl->lock));
 239	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
 240	hash_val = hash_val >> (32 - nht->hash_shift);
 241
 242	np = &nht->hash_buckets[hash_val];
 243	while ((n = rcu_dereference_protected(*np,
 244					      lockdep_is_held(&tbl->lock)))) {
 245		if (n == ndel)
 246			return neigh_del(n, np, tbl);
 247		np = &n->next;
 248	}
 249	return false;
 250}
 251
 252static int neigh_forced_gc(struct neigh_table *tbl)
 253{
 254	int max_clean = atomic_read(&tbl->gc_entries) -
 255			READ_ONCE(tbl->gc_thresh2);
 256	u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
 257	unsigned long tref = jiffies - 5 * HZ;
 258	struct neighbour *n, *tmp;
 259	int shrunk = 0;
 260	int loop = 0;
 261
 262	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 263
 264	write_lock_bh(&tbl->lock);
 265
 266	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
 267		if (refcount_read(&n->refcnt) == 1) {
 268			bool remove = false;
 269
 270			write_lock(&n->lock);
 271			if ((n->nud_state == NUD_FAILED) ||
 272			    (n->nud_state == NUD_NOARP) ||
 273			    (tbl->is_multicast &&
 274			     tbl->is_multicast(n->primary_key)) ||
 275			    !time_in_range(n->updated, tref, jiffies))
 276				remove = true;
 277			write_unlock(&n->lock);
 278
 279			if (remove && neigh_remove_one(n, tbl))
 280				shrunk++;
 281			if (shrunk >= max_clean)
 282				break;
 283			if (++loop == 16) {
 284				if (ktime_get_ns() > tmax)
 285					goto unlock;
 286				loop = 0;
 287			}
 288		}
 289	}
 290
 291	WRITE_ONCE(tbl->last_flush, jiffies);
 292unlock:
 293	write_unlock_bh(&tbl->lock);
 294
 295	return shrunk;
 296}
 297
 298static void neigh_add_timer(struct neighbour *n, unsigned long when)
 299{
 300	/* Use safe distance from the jiffies - LONG_MAX point while timer
 301	 * is running in DELAY/PROBE state but still show to user space
 302	 * large times in the past.
 303	 */
 304	unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
 305
 306	neigh_hold(n);
 307	if (!time_in_range(n->confirmed, mint, jiffies))
 308		n->confirmed = mint;
 309	if (time_before(n->used, n->confirmed))
 310		n->used = n->confirmed;
 311	if (unlikely(mod_timer(&n->timer, when))) {
 312		printk("NEIGH: BUG, double timer add, state is %x\n",
 313		       n->nud_state);
 314		dump_stack();
 315	}
 316}
 317
 318static int neigh_del_timer(struct neighbour *n)
 319{
 320	if ((n->nud_state & NUD_IN_TIMER) &&
 321	    del_timer(&n->timer)) {
 322		neigh_release(n);
 323		return 1;
 324	}
 325	return 0;
 326}
 327
 328static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
 329						   int family)
 330{
 331	switch (family) {
 332	case AF_INET:
 333		return __in_dev_arp_parms_get_rcu(dev);
 334	case AF_INET6:
 335		return __in6_dev_nd_parms_get_rcu(dev);
 336	}
 337	return NULL;
 338}
 339
 340static void neigh_parms_qlen_dec(struct net_device *dev, int family)
 341{
 342	struct neigh_parms *p;
 343
 344	rcu_read_lock();
 345	p = neigh_get_dev_parms_rcu(dev, family);
 346	if (p)
 347		p->qlen--;
 348	rcu_read_unlock();
 349}
 350
 351static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
 352			       int family)
 353{
 354	struct sk_buff_head tmp;
 355	unsigned long flags;
 356	struct sk_buff *skb;
 357
 358	skb_queue_head_init(&tmp);
 359	spin_lock_irqsave(&list->lock, flags);
 360	skb = skb_peek(list);
 361	while (skb != NULL) {
 362		struct sk_buff *skb_next = skb_peek_next(skb, list);
 363		struct net_device *dev = skb->dev;
 364
 365		if (net == NULL || net_eq(dev_net(dev), net)) {
 366			neigh_parms_qlen_dec(dev, family);
 367			__skb_unlink(skb, list);
 368			__skb_queue_tail(&tmp, skb);
 369		}
 370		skb = skb_next;
 371	}
 372	spin_unlock_irqrestore(&list->lock, flags);
 373
 374	while ((skb = __skb_dequeue(&tmp))) {
 375		dev_put(skb->dev);
 376		kfree_skb(skb);
 377	}
 378}
 379
 380static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
 381			    bool skip_perm)
 382{
 383	int i;
 384	struct neigh_hash_table *nht;
 385
 386	nht = rcu_dereference_protected(tbl->nht,
 387					lockdep_is_held(&tbl->lock));
 388
 389	for (i = 0; i < (1 << nht->hash_shift); i++) {
 390		struct neighbour *n;
 391		struct neighbour __rcu **np = &nht->hash_buckets[i];
 392
 393		while ((n = rcu_dereference_protected(*np,
 394					lockdep_is_held(&tbl->lock))) != NULL) {
 395			if (dev && n->dev != dev) {
 396				np = &n->next;
 397				continue;
 398			}
 399			if (skip_perm && n->nud_state & NUD_PERMANENT) {
 400				np = &n->next;
 401				continue;
 402			}
 403			rcu_assign_pointer(*np,
 404				   rcu_dereference_protected(n->next,
 405						lockdep_is_held(&tbl->lock)));
 406			write_lock(&n->lock);
 407			neigh_del_timer(n);
 408			neigh_mark_dead(n);
 409			if (refcount_read(&n->refcnt) != 1) {
 410				/* The most unpleasant situation.
 411				   We must destroy neighbour entry,
 412				   but someone still uses it.
 413
 414				   The destroy will be delayed until
 415				   the last user releases us, but
 416				   we must kill timers etc. and move
 417				   it to safe state.
 418				 */
 419				__skb_queue_purge(&n->arp_queue);
 420				n->arp_queue_len_bytes = 0;
 421				WRITE_ONCE(n->output, neigh_blackhole);
 422				if (n->nud_state & NUD_VALID)
 423					n->nud_state = NUD_NOARP;
 424				else
 425					n->nud_state = NUD_NONE;
 426				neigh_dbg(2, "neigh %p is stray\n", n);
 427			}
 428			write_unlock(&n->lock);
 429			neigh_cleanup_and_release(n);
 430		}
 431	}
 432}
 433
 434void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 435{
 436	write_lock_bh(&tbl->lock);
 437	neigh_flush_dev(tbl, dev, false);
 438	write_unlock_bh(&tbl->lock);
 439}
 440EXPORT_SYMBOL(neigh_changeaddr);
 441
 442static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
 443			  bool skip_perm)
 444{
 445	write_lock_bh(&tbl->lock);
 446	neigh_flush_dev(tbl, dev, skip_perm);
 447	pneigh_ifdown_and_unlock(tbl, dev);
 448	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
 449			   tbl->family);
 450	if (skb_queue_empty_lockless(&tbl->proxy_queue))
 451		del_timer_sync(&tbl->proxy_timer);
 452	return 0;
 453}
 454
 455int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
 456{
 457	__neigh_ifdown(tbl, dev, true);
 458	return 0;
 459}
 460EXPORT_SYMBOL(neigh_carrier_down);
 461
 462int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 463{
 464	__neigh_ifdown(tbl, dev, false);
 465	return 0;
 466}
 467EXPORT_SYMBOL(neigh_ifdown);
 468
 469static struct neighbour *neigh_alloc(struct neigh_table *tbl,
 470				     struct net_device *dev,
 471				     u32 flags, bool exempt_from_gc)
 472{
 473	struct neighbour *n = NULL;
 474	unsigned long now = jiffies;
 475	int entries, gc_thresh3;
 476
 477	if (exempt_from_gc)
 478		goto do_alloc;
 479
 480	entries = atomic_inc_return(&tbl->gc_entries) - 1;
 481	gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
 482	if (entries >= gc_thresh3 ||
 483	    (entries >= READ_ONCE(tbl->gc_thresh2) &&
 484	     time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
 485		if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
 486			net_info_ratelimited("%s: neighbor table overflow!\n",
 487					     tbl->id);
 488			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 489			goto out_entries;
 490		}
 491	}
 492
 493do_alloc:
 494	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 495	if (!n)
 496		goto out_entries;
 497
 498	__skb_queue_head_init(&n->arp_queue);
 499	rwlock_init(&n->lock);
 500	seqlock_init(&n->ha_lock);
 501	n->updated	  = n->used = now;
 502	n->nud_state	  = NUD_NONE;
 503	n->output	  = neigh_blackhole;
 504	n->flags	  = flags;
 505	seqlock_init(&n->hh.hh_lock);
 506	n->parms	  = neigh_parms_clone(&tbl->parms);
 507	timer_setup(&n->timer, neigh_timer_handler, 0);
 508
 509	NEIGH_CACHE_STAT_INC(tbl, allocs);
 510	n->tbl		  = tbl;
 511	refcount_set(&n->refcnt, 1);
 512	n->dead		  = 1;
 513	INIT_LIST_HEAD(&n->gc_list);
 514	INIT_LIST_HEAD(&n->managed_list);
 515
 516	atomic_inc(&tbl->entries);
 517out:
 518	return n;
 519
 520out_entries:
 521	if (!exempt_from_gc)
 522		atomic_dec(&tbl->gc_entries);
 523	goto out;
 524}
 525
 526static void neigh_get_hash_rnd(u32 *x)
 527{
 528	*x = get_random_u32() | 1;
 529}
 530
 531static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 532{
 533	size_t size = (1 << shift) * sizeof(struct neighbour *);
 534	struct neigh_hash_table *ret;
 535	struct neighbour __rcu **buckets;
 536	int i;
 537
 538	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 539	if (!ret)
 540		return NULL;
 541	if (size <= PAGE_SIZE) {
 542		buckets = kzalloc(size, GFP_ATOMIC);
 543	} else {
 544		buckets = (struct neighbour __rcu **)
 545			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 546					   get_order(size));
 547		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
 548	}
 549	if (!buckets) {
 550		kfree(ret);
 551		return NULL;
 552	}
 553	ret->hash_buckets = buckets;
 554	ret->hash_shift = shift;
 555	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 556		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 557	return ret;
 558}
 559
 560static void neigh_hash_free_rcu(struct rcu_head *head)
 561{
 562	struct neigh_hash_table *nht = container_of(head,
 563						    struct neigh_hash_table,
 564						    rcu);
 565	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 566	struct neighbour __rcu **buckets = nht->hash_buckets;
 567
 568	if (size <= PAGE_SIZE) {
 569		kfree(buckets);
 570	} else {
 571		kmemleak_free(buckets);
 572		free_pages((unsigned long)buckets, get_order(size));
 573	}
 574	kfree(nht);
 575}
 576
 577static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 578						unsigned long new_shift)
 579{
 580	unsigned int i, hash;
 581	struct neigh_hash_table *new_nht, *old_nht;
 582
 583	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 584
 585	old_nht = rcu_dereference_protected(tbl->nht,
 586					    lockdep_is_held(&tbl->lock));
 587	new_nht = neigh_hash_alloc(new_shift);
 588	if (!new_nht)
 589		return old_nht;
 590
 591	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 592		struct neighbour *n, *next;
 593
 594		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 595						   lockdep_is_held(&tbl->lock));
 596		     n != NULL;
 597		     n = next) {
 598			hash = tbl->hash(n->primary_key, n->dev,
 599					 new_nht->hash_rnd);
 600
 601			hash >>= (32 - new_nht->hash_shift);
 602			next = rcu_dereference_protected(n->next,
 603						lockdep_is_held(&tbl->lock));
 604
 605			rcu_assign_pointer(n->next,
 606					   rcu_dereference_protected(
 607						new_nht->hash_buckets[hash],
 608						lockdep_is_held(&tbl->lock)));
 609			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 610		}
 611	}
 612
 613	rcu_assign_pointer(tbl->nht, new_nht);
 614	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 615	return new_nht;
 616}
 617
 618struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 619			       struct net_device *dev)
 620{
 621	struct neighbour *n;
 622
 623	NEIGH_CACHE_STAT_INC(tbl, lookups);
 624
 625	rcu_read_lock();
 626	n = __neigh_lookup_noref(tbl, pkey, dev);
 627	if (n) {
 628		if (!refcount_inc_not_zero(&n->refcnt))
 629			n = NULL;
 630		NEIGH_CACHE_STAT_INC(tbl, hits);
 631	}
 632
 633	rcu_read_unlock();
 634	return n;
 635}
 636EXPORT_SYMBOL(neigh_lookup);
 637
 638static struct neighbour *
 639___neigh_create(struct neigh_table *tbl, const void *pkey,
 640		struct net_device *dev, u32 flags,
 641		bool exempt_from_gc, bool want_ref)
 642{
 643	u32 hash_val, key_len = tbl->key_len;
 644	struct neighbour *n1, *rc, *n;
 
 645	struct neigh_hash_table *nht;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646	int error;
 
 647
 648	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
 649	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
 
 650	if (!n) {
 651		rc = ERR_PTR(-ENOBUFS);
 652		goto out;
 653	}
 654
 655	memcpy(n->primary_key, pkey, key_len);
 656	n->dev = dev;
 657	netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
 658
 659	/* Protocol specific setup. */
 660	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 661		rc = ERR_PTR(error);
 662		goto out_neigh_release;
 663	}
 664
 665	if (dev->netdev_ops->ndo_neigh_construct) {
 666		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
 667		if (error < 0) {
 668			rc = ERR_PTR(error);
 669			goto out_neigh_release;
 670		}
 671	}
 672
 673	/* Device specific setup. */
 674	if (n->parms->neigh_setup &&
 675	    (error = n->parms->neigh_setup(n)) < 0) {
 676		rc = ERR_PTR(error);
 677		goto out_neigh_release;
 678	}
 679
 680	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 681
 682	write_lock_bh(&tbl->lock);
 683	nht = rcu_dereference_protected(tbl->nht,
 684					lockdep_is_held(&tbl->lock));
 685
 686	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 687		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 688
 689	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 690
 691	if (n->parms->dead) {
 692		rc = ERR_PTR(-EINVAL);
 693		goto out_tbl_unlock;
 694	}
 695
 696	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 697					    lockdep_is_held(&tbl->lock));
 698	     n1 != NULL;
 699	     n1 = rcu_dereference_protected(n1->next,
 700			lockdep_is_held(&tbl->lock))) {
 701		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
 702			if (want_ref)
 703				neigh_hold(n1);
 704			rc = n1;
 705			goto out_tbl_unlock;
 706		}
 707	}
 708
 709	n->dead = 0;
 710	if (!exempt_from_gc)
 711		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 712	if (n->flags & NTF_MANAGED)
 713		list_add_tail(&n->managed_list, &n->tbl->managed_list);
 714	if (want_ref)
 715		neigh_hold(n);
 716	rcu_assign_pointer(n->next,
 717			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 718						     lockdep_is_held(&tbl->lock)));
 719	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 720	write_unlock_bh(&tbl->lock);
 721	neigh_dbg(2, "neigh %p is created\n", n);
 722	rc = n;
 723out:
 724	return rc;
 725out_tbl_unlock:
 726	write_unlock_bh(&tbl->lock);
 727out_neigh_release:
 728	if (!exempt_from_gc)
 729		atomic_dec(&tbl->gc_entries);
 730	neigh_release(n);
 731	goto out;
 732}
 733
 734struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 735				 struct net_device *dev, bool want_ref)
 736{
 737	return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
 738}
 739EXPORT_SYMBOL(__neigh_create);
 740
 741static u32 pneigh_hash(const void *pkey, unsigned int key_len)
 742{
 743	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 744	hash_val ^= (hash_val >> 16);
 745	hash_val ^= hash_val >> 8;
 746	hash_val ^= hash_val >> 4;
 747	hash_val &= PNEIGH_HASHMASK;
 748	return hash_val;
 749}
 750
 751static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 752					      struct net *net,
 753					      const void *pkey,
 754					      unsigned int key_len,
 755					      struct net_device *dev)
 756{
 757	while (n) {
 758		if (!memcmp(n->key, pkey, key_len) &&
 759		    net_eq(pneigh_net(n), net) &&
 760		    (n->dev == dev || !n->dev))
 761			return n;
 762		n = n->next;
 763	}
 764	return NULL;
 765}
 766
 767struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 768		struct net *net, const void *pkey, struct net_device *dev)
 769{
 770	unsigned int key_len = tbl->key_len;
 771	u32 hash_val = pneigh_hash(pkey, key_len);
 772
 773	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 774				 net, pkey, key_len, dev);
 775}
 776EXPORT_SYMBOL_GPL(__pneigh_lookup);
 777
 778struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 779				    struct net *net, const void *pkey,
 780				    struct net_device *dev, int creat)
 781{
 782	struct pneigh_entry *n;
 783	unsigned int key_len = tbl->key_len;
 784	u32 hash_val = pneigh_hash(pkey, key_len);
 785
 786	read_lock_bh(&tbl->lock);
 787	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 788			      net, pkey, key_len, dev);
 789	read_unlock_bh(&tbl->lock);
 790
 791	if (n || !creat)
 792		goto out;
 793
 794	ASSERT_RTNL();
 795
 796	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
 797	if (!n)
 798		goto out;
 799
 
 800	write_pnet(&n->net, net);
 801	memcpy(n->key, pkey, key_len);
 802	n->dev = dev;
 803	netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
 
 804
 805	if (tbl->pconstructor && tbl->pconstructor(n)) {
 806		netdev_put(dev, &n->dev_tracker);
 
 807		kfree(n);
 808		n = NULL;
 809		goto out;
 810	}
 811
 812	write_lock_bh(&tbl->lock);
 813	n->next = tbl->phash_buckets[hash_val];
 814	tbl->phash_buckets[hash_val] = n;
 815	write_unlock_bh(&tbl->lock);
 816out:
 817	return n;
 818}
 819EXPORT_SYMBOL(pneigh_lookup);
 820
 821
 822int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 823		  struct net_device *dev)
 824{
 825	struct pneigh_entry *n, **np;
 826	unsigned int key_len = tbl->key_len;
 827	u32 hash_val = pneigh_hash(pkey, key_len);
 828
 829	write_lock_bh(&tbl->lock);
 830	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 831	     np = &n->next) {
 832		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 833		    net_eq(pneigh_net(n), net)) {
 834			*np = n->next;
 835			write_unlock_bh(&tbl->lock);
 836			if (tbl->pdestructor)
 837				tbl->pdestructor(n);
 838			netdev_put(n->dev, &n->dev_tracker);
 
 839			kfree(n);
 840			return 0;
 841		}
 842	}
 843	write_unlock_bh(&tbl->lock);
 844	return -ENOENT;
 845}
 846
 847static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
 848				    struct net_device *dev)
 849{
 850	struct pneigh_entry *n, **np, *freelist = NULL;
 851	u32 h;
 852
 853	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 854		np = &tbl->phash_buckets[h];
 855		while ((n = *np) != NULL) {
 856			if (!dev || n->dev == dev) {
 857				*np = n->next;
 858				n->next = freelist;
 859				freelist = n;
 860				continue;
 861			}
 862			np = &n->next;
 863		}
 864	}
 865	write_unlock_bh(&tbl->lock);
 866	while ((n = freelist)) {
 867		freelist = n->next;
 868		n->next = NULL;
 869		if (tbl->pdestructor)
 870			tbl->pdestructor(n);
 871		netdev_put(n->dev, &n->dev_tracker);
 
 872		kfree(n);
 873	}
 874	return -ENOENT;
 875}
 876
 877static void neigh_parms_destroy(struct neigh_parms *parms);
 878
 879static inline void neigh_parms_put(struct neigh_parms *parms)
 880{
 881	if (refcount_dec_and_test(&parms->refcnt))
 882		neigh_parms_destroy(parms);
 883}
 884
 885/*
 886 *	neighbour must already be out of the table;
 887 *
 888 */
 889void neigh_destroy(struct neighbour *neigh)
 890{
 891	struct net_device *dev = neigh->dev;
 892
 893	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 894
 895	if (!neigh->dead) {
 896		pr_warn("Destroying alive neighbour %p\n", neigh);
 897		dump_stack();
 898		return;
 899	}
 900
 901	if (neigh_del_timer(neigh))
 902		pr_warn("Impossible event\n");
 903
 904	write_lock_bh(&neigh->lock);
 905	__skb_queue_purge(&neigh->arp_queue);
 906	write_unlock_bh(&neigh->lock);
 907	neigh->arp_queue_len_bytes = 0;
 908
 909	if (dev->netdev_ops->ndo_neigh_destroy)
 910		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
 911
 912	netdev_put(dev, &neigh->dev_tracker);
 913	neigh_parms_put(neigh->parms);
 914
 915	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 916
 917	atomic_dec(&neigh->tbl->entries);
 918	kfree_rcu(neigh, rcu);
 919}
 920EXPORT_SYMBOL(neigh_destroy);
 921
 922/* Neighbour state is suspicious;
 923   disable fast path.
 924
 925   Called with write_locked neigh.
 926 */
 927static void neigh_suspect(struct neighbour *neigh)
 928{
 929	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 930
 931	WRITE_ONCE(neigh->output, neigh->ops->output);
 932}
 933
 934/* Neighbour state is OK;
 935   enable fast path.
 936
 937   Called with write_locked neigh.
 938 */
 939static void neigh_connect(struct neighbour *neigh)
 940{
 941	neigh_dbg(2, "neigh %p is connected\n", neigh);
 942
 943	WRITE_ONCE(neigh->output, neigh->ops->connected_output);
 944}
 945
 946static void neigh_periodic_work(struct work_struct *work)
 947{
 948	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 949	struct neighbour *n;
 950	struct neighbour __rcu **np;
 951	unsigned int i;
 952	struct neigh_hash_table *nht;
 953
 954	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 955
 956	write_lock_bh(&tbl->lock);
 957	nht = rcu_dereference_protected(tbl->nht,
 958					lockdep_is_held(&tbl->lock));
 959
 960	/*
 961	 *	periodically recompute ReachableTime from random function
 962	 */
 963
 964	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 965		struct neigh_parms *p;
 966
 967		WRITE_ONCE(tbl->last_rand, jiffies);
 968		list_for_each_entry(p, &tbl->parms_list, list)
 969			p->reachable_time =
 970				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 971	}
 972
 973	if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
 974		goto out;
 975
 976	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 977		np = &nht->hash_buckets[i];
 978
 979		while ((n = rcu_dereference_protected(*np,
 980				lockdep_is_held(&tbl->lock))) != NULL) {
 981			unsigned int state;
 982
 983			write_lock(&n->lock);
 984
 985			state = n->nud_state;
 986			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
 987			    (n->flags & NTF_EXT_LEARNED)) {
 988				write_unlock(&n->lock);
 989				goto next_elt;
 990			}
 991
 992			if (time_before(n->used, n->confirmed) &&
 993			    time_is_before_eq_jiffies(n->confirmed))
 994				n->used = n->confirmed;
 995
 996			if (refcount_read(&n->refcnt) == 1 &&
 997			    (state == NUD_FAILED ||
 998			     !time_in_range_open(jiffies, n->used,
 999						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
1000				rcu_assign_pointer(*np,
1001					rcu_dereference_protected(n->next,
1002						lockdep_is_held(&tbl->lock)));
1003				neigh_mark_dead(n);
1004				write_unlock(&n->lock);
1005				neigh_cleanup_and_release(n);
1006				continue;
1007			}
1008			write_unlock(&n->lock);
1009
1010next_elt:
1011			np = &n->next;
1012		}
1013		/*
1014		 * It's fine to release lock here, even if hash table
1015		 * grows while we are preempted.
1016		 */
1017		write_unlock_bh(&tbl->lock);
1018		cond_resched();
1019		write_lock_bh(&tbl->lock);
1020		nht = rcu_dereference_protected(tbl->nht,
1021						lockdep_is_held(&tbl->lock));
1022	}
1023out:
1024	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
1025	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
1026	 * BASE_REACHABLE_TIME.
1027	 */
1028	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1029			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1030	write_unlock_bh(&tbl->lock);
1031}
1032
1033static __inline__ int neigh_max_probes(struct neighbour *n)
1034{
1035	struct neigh_parms *p = n->parms;
1036	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1037	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1038	        NEIGH_VAR(p, MCAST_PROBES));
1039}
1040
1041static void neigh_invalidate(struct neighbour *neigh)
1042	__releases(neigh->lock)
1043	__acquires(neigh->lock)
1044{
1045	struct sk_buff *skb;
1046
1047	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1048	neigh_dbg(2, "neigh %p is failed\n", neigh);
1049	neigh->updated = jiffies;
1050
1051	/* It is very thin place. report_unreachable is very complicated
1052	   routine. Particularly, it can hit the same neighbour entry!
1053
1054	   So that, we try to be accurate and avoid dead loop. --ANK
1055	 */
1056	while (neigh->nud_state == NUD_FAILED &&
1057	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1058		write_unlock(&neigh->lock);
1059		neigh->ops->error_report(neigh, skb);
1060		write_lock(&neigh->lock);
1061	}
1062	__skb_queue_purge(&neigh->arp_queue);
1063	neigh->arp_queue_len_bytes = 0;
1064}
1065
1066static void neigh_probe(struct neighbour *neigh)
1067	__releases(neigh->lock)
1068{
1069	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1070	/* keep skb alive even if arp_queue overflows */
1071	if (skb)
1072		skb = skb_clone(skb, GFP_ATOMIC);
1073	write_unlock(&neigh->lock);
1074	if (neigh->ops->solicit)
1075		neigh->ops->solicit(neigh, skb);
1076	atomic_inc(&neigh->probes);
1077	consume_skb(skb);
1078}
1079
1080/* Called when a timer expires for a neighbour entry. */
1081
1082static void neigh_timer_handler(struct timer_list *t)
1083{
1084	unsigned long now, next;
1085	struct neighbour *neigh = from_timer(neigh, t, timer);
1086	unsigned int state;
1087	int notify = 0;
1088
1089	write_lock(&neigh->lock);
1090
1091	state = neigh->nud_state;
1092	now = jiffies;
1093	next = now + HZ;
1094
1095	if (!(state & NUD_IN_TIMER))
1096		goto out;
1097
1098	if (state & NUD_REACHABLE) {
1099		if (time_before_eq(now,
1100				   neigh->confirmed + neigh->parms->reachable_time)) {
1101			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1102			next = neigh->confirmed + neigh->parms->reachable_time;
1103		} else if (time_before_eq(now,
1104					  neigh->used +
1105					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1106			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1107			WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1108			neigh->updated = jiffies;
1109			neigh_suspect(neigh);
1110			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1111		} else {
1112			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1113			WRITE_ONCE(neigh->nud_state, NUD_STALE);
1114			neigh->updated = jiffies;
1115			neigh_suspect(neigh);
1116			notify = 1;
1117		}
1118	} else if (state & NUD_DELAY) {
1119		if (time_before_eq(now,
1120				   neigh->confirmed +
1121				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1122			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1123			WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1124			neigh->updated = jiffies;
1125			neigh_connect(neigh);
1126			notify = 1;
1127			next = neigh->confirmed + neigh->parms->reachable_time;
1128		} else {
1129			neigh_dbg(2, "neigh %p is probed\n", neigh);
1130			WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1131			neigh->updated = jiffies;
1132			atomic_set(&neigh->probes, 0);
1133			notify = 1;
1134			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1135					 HZ/100);
1136		}
1137	} else {
1138		/* NUD_PROBE|NUD_INCOMPLETE */
1139		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1140	}
1141
1142	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1143	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1144		WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1145		notify = 1;
1146		neigh_invalidate(neigh);
1147		goto out;
1148	}
1149
1150	if (neigh->nud_state & NUD_IN_TIMER) {
1151		if (time_before(next, jiffies + HZ/100))
1152			next = jiffies + HZ/100;
1153		if (!mod_timer(&neigh->timer, next))
1154			neigh_hold(neigh);
1155	}
1156	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1157		neigh_probe(neigh);
1158	} else {
1159out:
1160		write_unlock(&neigh->lock);
1161	}
1162
1163	if (notify)
1164		neigh_update_notify(neigh, 0);
1165
1166	trace_neigh_timer_handler(neigh, 0);
1167
1168	neigh_release(neigh);
1169}
1170
1171int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1172		       const bool immediate_ok)
1173{
1174	int rc;
1175	bool immediate_probe = false;
1176
1177	write_lock_bh(&neigh->lock);
1178
1179	rc = 0;
1180	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1181		goto out_unlock_bh;
1182	if (neigh->dead)
1183		goto out_dead;
1184
1185	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1186		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1187		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1188			unsigned long next, now = jiffies;
1189
1190			atomic_set(&neigh->probes,
1191				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1192			neigh_del_timer(neigh);
1193			WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1194			neigh->updated = now;
1195			if (!immediate_ok) {
1196				next = now + 1;
1197			} else {
1198				immediate_probe = true;
1199				next = now + max(NEIGH_VAR(neigh->parms,
1200							   RETRANS_TIME),
1201						 HZ / 100);
1202			}
1203			neigh_add_timer(neigh, next);
 
1204		} else {
1205			WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1206			neigh->updated = jiffies;
1207			write_unlock_bh(&neigh->lock);
1208
1209			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1210			return 1;
1211		}
1212	} else if (neigh->nud_state & NUD_STALE) {
1213		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1214		neigh_del_timer(neigh);
1215		WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1216		neigh->updated = jiffies;
1217		neigh_add_timer(neigh, jiffies +
1218				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1219	}
1220
1221	if (neigh->nud_state == NUD_INCOMPLETE) {
1222		if (skb) {
1223			while (neigh->arp_queue_len_bytes + skb->truesize >
1224			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1225				struct sk_buff *buff;
1226
1227				buff = __skb_dequeue(&neigh->arp_queue);
1228				if (!buff)
1229					break;
1230				neigh->arp_queue_len_bytes -= buff->truesize;
1231				kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1232				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1233			}
1234			skb_dst_force(skb);
1235			__skb_queue_tail(&neigh->arp_queue, skb);
1236			neigh->arp_queue_len_bytes += skb->truesize;
1237		}
1238		rc = 1;
1239	}
1240out_unlock_bh:
1241	if (immediate_probe)
1242		neigh_probe(neigh);
1243	else
1244		write_unlock(&neigh->lock);
1245	local_bh_enable();
1246	trace_neigh_event_send_done(neigh, rc);
1247	return rc;
1248
1249out_dead:
1250	if (neigh->nud_state & NUD_STALE)
1251		goto out_unlock_bh;
1252	write_unlock_bh(&neigh->lock);
1253	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1254	trace_neigh_event_send_dead(neigh, 1);
1255	return 1;
1256}
1257EXPORT_SYMBOL(__neigh_event_send);
1258
1259static void neigh_update_hhs(struct neighbour *neigh)
1260{
1261	struct hh_cache *hh;
1262	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1263		= NULL;
1264
1265	if (neigh->dev->header_ops)
1266		update = neigh->dev->header_ops->cache_update;
1267
1268	if (update) {
1269		hh = &neigh->hh;
1270		if (READ_ONCE(hh->hh_len)) {
1271			write_seqlock_bh(&hh->hh_lock);
1272			update(hh, neigh->dev, neigh->ha);
1273			write_sequnlock_bh(&hh->hh_lock);
1274		}
1275	}
1276}
1277
 
 
1278/* Generic update routine.
1279   -- lladdr is new lladdr or NULL, if it is not supplied.
1280   -- new    is new state.
1281   -- flags
1282	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1283				if it is different.
1284	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1285				lladdr instead of overriding it
1286				if it is different.
1287	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1288	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1289	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
1290	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1291				NTF_ROUTER flag.
1292	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1293				a router.
1294
1295   Caller MUST hold reference count on the entry.
1296 */
 
1297static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1298			  u8 new, u32 flags, u32 nlmsg_pid,
1299			  struct netlink_ext_ack *extack)
1300{
1301	bool gc_update = false, managed_update = false;
 
 
 
 
1302	int update_isrouter = 0;
1303	struct net_device *dev;
1304	int err, notify = 0;
1305	u8 old;
1306
1307	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1308
1309	write_lock_bh(&neigh->lock);
1310
1311	dev    = neigh->dev;
1312	old    = neigh->nud_state;
1313	err    = -EPERM;
1314
1315	if (neigh->dead) {
1316		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1317		new = old;
1318		goto out;
1319	}
1320	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1321	    (old & (NUD_NOARP | NUD_PERMANENT)))
1322		goto out;
1323
1324	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1325	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1326		new = old & ~NUD_PERMANENT;
1327		WRITE_ONCE(neigh->nud_state, new);
1328		err = 0;
1329		goto out;
1330	}
1331
1332	if (!(new & NUD_VALID)) {
1333		neigh_del_timer(neigh);
1334		if (old & NUD_CONNECTED)
1335			neigh_suspect(neigh);
1336		WRITE_ONCE(neigh->nud_state, new);
1337		err = 0;
1338		notify = old & NUD_VALID;
1339		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1340		    (new & NUD_FAILED)) {
1341			neigh_invalidate(neigh);
1342			notify = 1;
1343		}
1344		goto out;
1345	}
1346
1347	/* Compare new lladdr with cached one */
1348	if (!dev->addr_len) {
1349		/* First case: device needs no address. */
1350		lladdr = neigh->ha;
1351	} else if (lladdr) {
1352		/* The second case: if something is already cached
1353		   and a new address is proposed:
1354		   - compare new & old
1355		   - if they are different, check override flag
1356		 */
1357		if ((old & NUD_VALID) &&
1358		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1359			lladdr = neigh->ha;
1360	} else {
1361		/* No address is supplied; if we know something,
1362		   use it, otherwise discard the request.
1363		 */
1364		err = -EINVAL;
1365		if (!(old & NUD_VALID)) {
1366			NL_SET_ERR_MSG(extack, "No link layer address given");
1367			goto out;
1368		}
1369		lladdr = neigh->ha;
1370	}
1371
1372	/* Update confirmed timestamp for neighbour entry after we
1373	 * received ARP packet even if it doesn't change IP to MAC binding.
1374	 */
1375	if (new & NUD_CONNECTED)
1376		neigh->confirmed = jiffies;
1377
1378	/* If entry was valid and address is not changed,
1379	   do not change entry state, if new one is STALE.
1380	 */
1381	err = 0;
1382	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1383	if (old & NUD_VALID) {
1384		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1385			update_isrouter = 0;
1386			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1387			    (old & NUD_CONNECTED)) {
1388				lladdr = neigh->ha;
1389				new = NUD_STALE;
1390			} else
1391				goto out;
1392		} else {
1393			if (lladdr == neigh->ha && new == NUD_STALE &&
1394			    !(flags & NEIGH_UPDATE_F_ADMIN))
1395				new = old;
1396		}
1397	}
1398
1399	/* Update timestamp only once we know we will make a change to the
1400	 * neighbour entry. Otherwise we risk to move the locktime window with
1401	 * noop updates and ignore relevant ARP updates.
1402	 */
1403	if (new != old || lladdr != neigh->ha)
1404		neigh->updated = jiffies;
1405
1406	if (new != old) {
1407		neigh_del_timer(neigh);
1408		if (new & NUD_PROBE)
1409			atomic_set(&neigh->probes, 0);
1410		if (new & NUD_IN_TIMER)
1411			neigh_add_timer(neigh, (jiffies +
1412						((new & NUD_REACHABLE) ?
1413						 neigh->parms->reachable_time :
1414						 0)));
1415		WRITE_ONCE(neigh->nud_state, new);
1416		notify = 1;
1417	}
1418
1419	if (lladdr != neigh->ha) {
1420		write_seqlock(&neigh->ha_lock);
1421		memcpy(&neigh->ha, lladdr, dev->addr_len);
1422		write_sequnlock(&neigh->ha_lock);
1423		neigh_update_hhs(neigh);
1424		if (!(new & NUD_CONNECTED))
1425			neigh->confirmed = jiffies -
1426				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1427		notify = 1;
1428	}
1429	if (new == old)
1430		goto out;
1431	if (new & NUD_CONNECTED)
1432		neigh_connect(neigh);
1433	else
1434		neigh_suspect(neigh);
1435	if (!(old & NUD_VALID)) {
1436		struct sk_buff *skb;
1437
1438		/* Again: avoid dead loop if something went wrong */
1439
1440		while (neigh->nud_state & NUD_VALID &&
1441		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1442			struct dst_entry *dst = skb_dst(skb);
1443			struct neighbour *n2, *n1 = neigh;
1444			write_unlock_bh(&neigh->lock);
1445
1446			rcu_read_lock();
1447
1448			/* Why not just use 'neigh' as-is?  The problem is that
1449			 * things such as shaper, eql, and sch_teql can end up
1450			 * using alternative, different, neigh objects to output
1451			 * the packet in the output path.  So what we need to do
1452			 * here is re-lookup the top-level neigh in the path so
1453			 * we can reinject the packet there.
1454			 */
1455			n2 = NULL;
1456			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1457				n2 = dst_neigh_lookup_skb(dst, skb);
1458				if (n2)
1459					n1 = n2;
1460			}
1461			READ_ONCE(n1->output)(n1, skb);
1462			if (n2)
1463				neigh_release(n2);
1464			rcu_read_unlock();
1465
1466			write_lock_bh(&neigh->lock);
1467		}
1468		__skb_queue_purge(&neigh->arp_queue);
1469		neigh->arp_queue_len_bytes = 0;
1470	}
1471out:
1472	if (update_isrouter)
1473		neigh_update_is_router(neigh, flags, &notify);
1474	write_unlock_bh(&neigh->lock);
1475	if (((new ^ old) & NUD_PERMANENT) || gc_update)
 
1476		neigh_update_gc_list(neigh);
1477	if (managed_update)
1478		neigh_update_managed_list(neigh);
1479	if (notify)
1480		neigh_update_notify(neigh, nlmsg_pid);
 
1481	trace_neigh_update_done(neigh, err);
 
1482	return err;
1483}
1484
1485int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1486		 u32 flags, u32 nlmsg_pid)
1487{
1488	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1489}
1490EXPORT_SYMBOL(neigh_update);
1491
1492/* Update the neigh to listen temporarily for probe responses, even if it is
1493 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1494 */
1495void __neigh_set_probe_once(struct neighbour *neigh)
1496{
1497	if (neigh->dead)
1498		return;
1499	neigh->updated = jiffies;
1500	if (!(neigh->nud_state & NUD_FAILED))
1501		return;
1502	WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1503	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1504	neigh_add_timer(neigh,
1505			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1506				      HZ/100));
1507}
1508EXPORT_SYMBOL(__neigh_set_probe_once);
1509
1510struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1511				 u8 *lladdr, void *saddr,
1512				 struct net_device *dev)
1513{
1514	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1515						 lladdr || !dev->addr_len);
1516	if (neigh)
1517		neigh_update(neigh, lladdr, NUD_STALE,
1518			     NEIGH_UPDATE_F_OVERRIDE, 0);
1519	return neigh;
1520}
1521EXPORT_SYMBOL(neigh_event_ns);
1522
1523/* called with read_lock_bh(&n->lock); */
1524static void neigh_hh_init(struct neighbour *n)
1525{
1526	struct net_device *dev = n->dev;
1527	__be16 prot = n->tbl->protocol;
1528	struct hh_cache	*hh = &n->hh;
1529
1530	write_lock_bh(&n->lock);
1531
1532	/* Only one thread can come in here and initialize the
1533	 * hh_cache entry.
1534	 */
1535	if (!hh->hh_len)
1536		dev->header_ops->cache(n, hh, prot);
1537
1538	write_unlock_bh(&n->lock);
1539}
1540
1541/* Slow and careful. */
1542
1543int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1544{
1545	int rc = 0;
1546
1547	if (!neigh_event_send(neigh, skb)) {
1548		int err;
1549		struct net_device *dev = neigh->dev;
1550		unsigned int seq;
1551
1552		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1553			neigh_hh_init(neigh);
1554
1555		do {
1556			__skb_pull(skb, skb_network_offset(skb));
1557			seq = read_seqbegin(&neigh->ha_lock);
1558			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1559					      neigh->ha, NULL, skb->len);
1560		} while (read_seqretry(&neigh->ha_lock, seq));
1561
1562		if (err >= 0)
1563			rc = dev_queue_xmit(skb);
1564		else
1565			goto out_kfree_skb;
1566	}
1567out:
1568	return rc;
1569out_kfree_skb:
1570	rc = -EINVAL;
1571	kfree_skb(skb);
1572	goto out;
1573}
1574EXPORT_SYMBOL(neigh_resolve_output);
1575
1576/* As fast as possible without hh cache */
1577
1578int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1579{
1580	struct net_device *dev = neigh->dev;
1581	unsigned int seq;
1582	int err;
1583
1584	do {
1585		__skb_pull(skb, skb_network_offset(skb));
1586		seq = read_seqbegin(&neigh->ha_lock);
1587		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1588				      neigh->ha, NULL, skb->len);
1589	} while (read_seqretry(&neigh->ha_lock, seq));
1590
1591	if (err >= 0)
1592		err = dev_queue_xmit(skb);
1593	else {
1594		err = -EINVAL;
1595		kfree_skb(skb);
1596	}
1597	return err;
1598}
1599EXPORT_SYMBOL(neigh_connected_output);
1600
1601int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1602{
1603	return dev_queue_xmit(skb);
1604}
1605EXPORT_SYMBOL(neigh_direct_output);
1606
1607static void neigh_managed_work(struct work_struct *work)
1608{
1609	struct neigh_table *tbl = container_of(work, struct neigh_table,
1610					       managed_work.work);
1611	struct neighbour *neigh;
1612
1613	write_lock_bh(&tbl->lock);
1614	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1615		neigh_event_send_probe(neigh, NULL, false);
1616	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1617			   NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1618	write_unlock_bh(&tbl->lock);
1619}
1620
1621static void neigh_proxy_process(struct timer_list *t)
1622{
1623	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1624	long sched_next = 0;
1625	unsigned long now = jiffies;
1626	struct sk_buff *skb, *n;
1627
1628	spin_lock(&tbl->proxy_queue.lock);
1629
1630	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1631		long tdif = NEIGH_CB(skb)->sched_next - now;
1632
1633		if (tdif <= 0) {
1634			struct net_device *dev = skb->dev;
1635
1636			neigh_parms_qlen_dec(dev, tbl->family);
1637			__skb_unlink(skb, &tbl->proxy_queue);
1638
1639			if (tbl->proxy_redo && netif_running(dev)) {
1640				rcu_read_lock();
1641				tbl->proxy_redo(skb);
1642				rcu_read_unlock();
1643			} else {
1644				kfree_skb(skb);
1645			}
1646
1647			dev_put(dev);
1648		} else if (!sched_next || tdif < sched_next)
1649			sched_next = tdif;
1650	}
1651	del_timer(&tbl->proxy_timer);
1652	if (sched_next)
1653		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1654	spin_unlock(&tbl->proxy_queue.lock);
1655}
1656
1657static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1658{
1659	/* If proxy_delay is zero, do not call get_random_u32_below()
1660	 * as it is undefined behavior.
1661	 */
1662	unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1663
1664	return proxy_delay ?
1665	       jiffies + get_random_u32_below(proxy_delay) : jiffies;
1666}
1667
1668void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1669		    struct sk_buff *skb)
1670{
1671	unsigned long sched_next = neigh_proxy_delay(p);
 
1672
1673	if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1674		kfree_skb(skb);
1675		return;
1676	}
1677
1678	NEIGH_CB(skb)->sched_next = sched_next;
1679	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1680
1681	spin_lock(&tbl->proxy_queue.lock);
1682	if (del_timer(&tbl->proxy_timer)) {
1683		if (time_before(tbl->proxy_timer.expires, sched_next))
1684			sched_next = tbl->proxy_timer.expires;
1685	}
1686	skb_dst_drop(skb);
1687	dev_hold(skb->dev);
1688	__skb_queue_tail(&tbl->proxy_queue, skb);
1689	p->qlen++;
1690	mod_timer(&tbl->proxy_timer, sched_next);
1691	spin_unlock(&tbl->proxy_queue.lock);
1692}
1693EXPORT_SYMBOL(pneigh_enqueue);
1694
1695static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1696						      struct net *net, int ifindex)
1697{
1698	struct neigh_parms *p;
1699
1700	list_for_each_entry(p, &tbl->parms_list, list) {
1701		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1702		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1703			return p;
1704	}
1705
1706	return NULL;
1707}
1708
1709struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1710				      struct neigh_table *tbl)
1711{
1712	struct neigh_parms *p;
1713	struct net *net = dev_net(dev);
1714	const struct net_device_ops *ops = dev->netdev_ops;
1715
1716	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1717	if (p) {
1718		p->tbl		  = tbl;
1719		refcount_set(&p->refcnt, 1);
1720		p->reachable_time =
1721				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1722		p->qlen = 0;
1723		netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1724		p->dev = dev;
1725		write_pnet(&p->net, net);
1726		p->sysctl_table = NULL;
1727
1728		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1729			netdev_put(dev, &p->dev_tracker);
1730			kfree(p);
1731			return NULL;
1732		}
1733
1734		write_lock_bh(&tbl->lock);
1735		list_add(&p->list, &tbl->parms.list);
1736		write_unlock_bh(&tbl->lock);
1737
1738		neigh_parms_data_state_cleanall(p);
1739	}
1740	return p;
1741}
1742EXPORT_SYMBOL(neigh_parms_alloc);
1743
1744static void neigh_rcu_free_parms(struct rcu_head *head)
1745{
1746	struct neigh_parms *parms =
1747		container_of(head, struct neigh_parms, rcu_head);
1748
1749	neigh_parms_put(parms);
1750}
1751
1752void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1753{
1754	if (!parms || parms == &tbl->parms)
1755		return;
1756	write_lock_bh(&tbl->lock);
1757	list_del(&parms->list);
1758	parms->dead = 1;
1759	write_unlock_bh(&tbl->lock);
1760	netdev_put(parms->dev, &parms->dev_tracker);
 
1761	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1762}
1763EXPORT_SYMBOL(neigh_parms_release);
1764
1765static void neigh_parms_destroy(struct neigh_parms *parms)
1766{
1767	kfree(parms);
1768}
1769
1770static struct lock_class_key neigh_table_proxy_queue_class;
1771
1772static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1773
1774void neigh_table_init(int index, struct neigh_table *tbl)
1775{
1776	unsigned long now = jiffies;
1777	unsigned long phsize;
1778
1779	INIT_LIST_HEAD(&tbl->parms_list);
1780	INIT_LIST_HEAD(&tbl->gc_list);
1781	INIT_LIST_HEAD(&tbl->managed_list);
1782
1783	list_add(&tbl->parms.list, &tbl->parms_list);
1784	write_pnet(&tbl->parms.net, &init_net);
1785	refcount_set(&tbl->parms.refcnt, 1);
1786	tbl->parms.reachable_time =
1787			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1788	tbl->parms.qlen = 0;
1789
1790	tbl->stats = alloc_percpu(struct neigh_statistics);
1791	if (!tbl->stats)
1792		panic("cannot create neighbour cache statistics");
1793
1794#ifdef CONFIG_PROC_FS
1795	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1796			      &neigh_stat_seq_ops, tbl))
1797		panic("cannot create neighbour proc dir entry");
1798#endif
1799
1800	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1801
1802	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1803	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1804
1805	if (!tbl->nht || !tbl->phash_buckets)
1806		panic("cannot allocate neighbour cache hashes");
1807
1808	if (!tbl->entry_size)
1809		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1810					tbl->key_len, NEIGH_PRIV_ALIGN);
1811	else
1812		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1813
1814	rwlock_init(&tbl->lock);
1815
1816	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1817	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1818			tbl->parms.reachable_time);
1819	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1820	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1821
1822	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1823	skb_queue_head_init_class(&tbl->proxy_queue,
1824			&neigh_table_proxy_queue_class);
1825
1826	tbl->last_flush = now;
1827	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1828
1829	neigh_tables[index] = tbl;
1830}
1831EXPORT_SYMBOL(neigh_table_init);
1832
1833int neigh_table_clear(int index, struct neigh_table *tbl)
1834{
1835	neigh_tables[index] = NULL;
1836	/* It is not clean... Fix it to unload IPv6 module safely */
1837	cancel_delayed_work_sync(&tbl->managed_work);
1838	cancel_delayed_work_sync(&tbl->gc_work);
1839	del_timer_sync(&tbl->proxy_timer);
1840	pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1841	neigh_ifdown(tbl, NULL);
1842	if (atomic_read(&tbl->entries))
1843		pr_crit("neighbour leakage\n");
1844
1845	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1846		 neigh_hash_free_rcu);
1847	tbl->nht = NULL;
1848
1849	kfree(tbl->phash_buckets);
1850	tbl->phash_buckets = NULL;
1851
1852	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1853
1854	free_percpu(tbl->stats);
1855	tbl->stats = NULL;
1856
1857	return 0;
1858}
1859EXPORT_SYMBOL(neigh_table_clear);
1860
1861static struct neigh_table *neigh_find_table(int family)
1862{
1863	struct neigh_table *tbl = NULL;
1864
1865	switch (family) {
1866	case AF_INET:
1867		tbl = neigh_tables[NEIGH_ARP_TABLE];
1868		break;
1869	case AF_INET6:
1870		tbl = neigh_tables[NEIGH_ND_TABLE];
1871		break;
 
 
 
1872	}
1873
1874	return tbl;
1875}
1876
1877const struct nla_policy nda_policy[NDA_MAX+1] = {
1878	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1879	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1880	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1881	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1882	[NDA_PROBES]		= { .type = NLA_U32 },
1883	[NDA_VLAN]		= { .type = NLA_U16 },
1884	[NDA_PORT]		= { .type = NLA_U16 },
1885	[NDA_VNI]		= { .type = NLA_U32 },
1886	[NDA_IFINDEX]		= { .type = NLA_U32 },
1887	[NDA_MASTER]		= { .type = NLA_U32 },
1888	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1889	[NDA_NH_ID]		= { .type = NLA_U32 },
1890	[NDA_FLAGS_EXT]		= NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1891	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1892};
1893
1894static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1895			struct netlink_ext_ack *extack)
1896{
1897	struct net *net = sock_net(skb->sk);
1898	struct ndmsg *ndm;
1899	struct nlattr *dst_attr;
1900	struct neigh_table *tbl;
1901	struct neighbour *neigh;
1902	struct net_device *dev = NULL;
1903	int err = -EINVAL;
1904
1905	ASSERT_RTNL();
1906	if (nlmsg_len(nlh) < sizeof(*ndm))
1907		goto out;
1908
1909	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1910	if (!dst_attr) {
1911		NL_SET_ERR_MSG(extack, "Network address not specified");
1912		goto out;
1913	}
1914
1915	ndm = nlmsg_data(nlh);
1916	if (ndm->ndm_ifindex) {
1917		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1918		if (dev == NULL) {
1919			err = -ENODEV;
1920			goto out;
1921		}
1922	}
1923
1924	tbl = neigh_find_table(ndm->ndm_family);
1925	if (tbl == NULL)
1926		return -EAFNOSUPPORT;
1927
1928	if (nla_len(dst_attr) < (int)tbl->key_len) {
1929		NL_SET_ERR_MSG(extack, "Invalid network address");
1930		goto out;
1931	}
1932
1933	if (ndm->ndm_flags & NTF_PROXY) {
1934		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1935		goto out;
1936	}
1937
1938	if (dev == NULL)
1939		goto out;
1940
1941	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1942	if (neigh == NULL) {
1943		err = -ENOENT;
1944		goto out;
1945	}
1946
1947	err = __neigh_update(neigh, NULL, NUD_FAILED,
1948			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1949			     NETLINK_CB(skb).portid, extack);
1950	write_lock_bh(&tbl->lock);
1951	neigh_release(neigh);
1952	neigh_remove_one(neigh, tbl);
1953	write_unlock_bh(&tbl->lock);
1954
1955out:
1956	return err;
1957}
1958
1959static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1960		     struct netlink_ext_ack *extack)
1961{
1962	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1963		    NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1964	struct net *net = sock_net(skb->sk);
1965	struct ndmsg *ndm;
1966	struct nlattr *tb[NDA_MAX+1];
1967	struct neigh_table *tbl;
1968	struct net_device *dev = NULL;
1969	struct neighbour *neigh;
1970	void *dst, *lladdr;
1971	u8 protocol = 0;
1972	u32 ndm_flags;
1973	int err;
1974
1975	ASSERT_RTNL();
1976	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1977				     nda_policy, extack);
1978	if (err < 0)
1979		goto out;
1980
1981	err = -EINVAL;
1982	if (!tb[NDA_DST]) {
1983		NL_SET_ERR_MSG(extack, "Network address not specified");
1984		goto out;
1985	}
1986
1987	ndm = nlmsg_data(nlh);
1988	ndm_flags = ndm->ndm_flags;
1989	if (tb[NDA_FLAGS_EXT]) {
1990		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1991
1992		BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1993			     (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1994			      hweight32(NTF_EXT_MASK)));
1995		ndm_flags |= (ext << NTF_EXT_SHIFT);
1996	}
1997	if (ndm->ndm_ifindex) {
1998		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1999		if (dev == NULL) {
2000			err = -ENODEV;
2001			goto out;
2002		}
2003
2004		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
2005			NL_SET_ERR_MSG(extack, "Invalid link address");
2006			goto out;
2007		}
2008	}
2009
2010	tbl = neigh_find_table(ndm->ndm_family);
2011	if (tbl == NULL)
2012		return -EAFNOSUPPORT;
2013
2014	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2015		NL_SET_ERR_MSG(extack, "Invalid network address");
2016		goto out;
2017	}
2018
2019	dst = nla_data(tb[NDA_DST]);
2020	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
2021
2022	if (tb[NDA_PROTOCOL])
2023		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
2024	if (ndm_flags & NTF_PROXY) {
 
2025		struct pneigh_entry *pn;
2026
2027		if (ndm_flags & NTF_MANAGED) {
2028			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
2029			goto out;
2030		}
2031
2032		err = -ENOBUFS;
2033		pn = pneigh_lookup(tbl, net, dst, dev, 1);
2034		if (pn) {
2035			pn->flags = ndm_flags;
2036			if (protocol)
2037				pn->protocol = protocol;
2038			err = 0;
2039		}
2040		goto out;
2041	}
2042
2043	if (!dev) {
2044		NL_SET_ERR_MSG(extack, "Device not specified");
2045		goto out;
2046	}
2047
2048	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2049		err = -EINVAL;
2050		goto out;
2051	}
2052
2053	neigh = neigh_lookup(tbl, dst, dev);
2054	if (neigh == NULL) {
2055		bool ndm_permanent  = ndm->ndm_state & NUD_PERMANENT;
2056		bool exempt_from_gc = ndm_permanent ||
2057				      ndm_flags & NTF_EXT_LEARNED;
2058
2059		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2060			err = -ENOENT;
2061			goto out;
2062		}
2063		if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2064			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2065			err = -EINVAL;
2066			goto out;
2067		}
2068
2069		neigh = ___neigh_create(tbl, dst, dev,
2070					ndm_flags &
2071					(NTF_EXT_LEARNED | NTF_MANAGED),
2072					exempt_from_gc, true);
2073		if (IS_ERR(neigh)) {
2074			err = PTR_ERR(neigh);
2075			goto out;
2076		}
2077	} else {
2078		if (nlh->nlmsg_flags & NLM_F_EXCL) {
2079			err = -EEXIST;
2080			neigh_release(neigh);
2081			goto out;
2082		}
2083
2084		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2085			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2086				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2087	}
2088
2089	if (protocol)
2090		neigh->protocol = protocol;
2091	if (ndm_flags & NTF_EXT_LEARNED)
 
2092		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2093	if (ndm_flags & NTF_ROUTER)
 
2094		flags |= NEIGH_UPDATE_F_ISROUTER;
2095	if (ndm_flags & NTF_MANAGED)
2096		flags |= NEIGH_UPDATE_F_MANAGED;
2097	if (ndm_flags & NTF_USE)
2098		flags |= NEIGH_UPDATE_F_USE;
2099
2100	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2101			     NETLINK_CB(skb).portid, extack);
2102	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2103		neigh_event_send(neigh, NULL);
2104		err = 0;
2105	}
 
 
 
2106	neigh_release(neigh);
 
2107out:
2108	return err;
2109}
2110
2111static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2112{
2113	struct nlattr *nest;
2114
2115	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2116	if (nest == NULL)
2117		return -ENOBUFS;
2118
2119	if ((parms->dev &&
2120	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2121	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2122	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2123			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2124	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2125	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2126			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2127	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2128	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2129	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2130			NEIGH_VAR(parms, UCAST_PROBES)) ||
2131	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2132			NEIGH_VAR(parms, MCAST_PROBES)) ||
2133	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2134			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2135	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2136			  NDTPA_PAD) ||
2137	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2138			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2139	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2140			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2141	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2142			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2143	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2144			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2145	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2146			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2147	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2148			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2149	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2150			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2151	    nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2152			  NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2153		goto nla_put_failure;
2154	return nla_nest_end(skb, nest);
2155
2156nla_put_failure:
2157	nla_nest_cancel(skb, nest);
2158	return -EMSGSIZE;
2159}
2160
2161static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2162			      u32 pid, u32 seq, int type, int flags)
2163{
2164	struct nlmsghdr *nlh;
2165	struct ndtmsg *ndtmsg;
2166
2167	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2168	if (nlh == NULL)
2169		return -EMSGSIZE;
2170
2171	ndtmsg = nlmsg_data(nlh);
2172
2173	read_lock_bh(&tbl->lock);
2174	ndtmsg->ndtm_family = tbl->family;
2175	ndtmsg->ndtm_pad1   = 0;
2176	ndtmsg->ndtm_pad2   = 0;
2177
2178	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2179	    nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2180			  NDTA_PAD) ||
2181	    nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2182	    nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2183	    nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2184		goto nla_put_failure;
2185	{
2186		unsigned long now = jiffies;
2187		long flush_delta = now - READ_ONCE(tbl->last_flush);
2188		long rand_delta = now - READ_ONCE(tbl->last_rand);
2189		struct neigh_hash_table *nht;
2190		struct ndt_config ndc = {
2191			.ndtc_key_len		= tbl->key_len,
2192			.ndtc_entry_size	= tbl->entry_size,
2193			.ndtc_entries		= atomic_read(&tbl->entries),
2194			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2195			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2196			.ndtc_proxy_qlen	= READ_ONCE(tbl->proxy_queue.qlen),
2197		};
2198
2199		rcu_read_lock();
2200		nht = rcu_dereference(tbl->nht);
2201		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2202		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2203		rcu_read_unlock();
2204
2205		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2206			goto nla_put_failure;
2207	}
2208
2209	{
2210		int cpu;
2211		struct ndt_stats ndst;
2212
2213		memset(&ndst, 0, sizeof(ndst));
2214
2215		for_each_possible_cpu(cpu) {
2216			struct neigh_statistics	*st;
2217
2218			st = per_cpu_ptr(tbl->stats, cpu);
2219			ndst.ndts_allocs		+= READ_ONCE(st->allocs);
2220			ndst.ndts_destroys		+= READ_ONCE(st->destroys);
2221			ndst.ndts_hash_grows		+= READ_ONCE(st->hash_grows);
2222			ndst.ndts_res_failed		+= READ_ONCE(st->res_failed);
2223			ndst.ndts_lookups		+= READ_ONCE(st->lookups);
2224			ndst.ndts_hits			+= READ_ONCE(st->hits);
2225			ndst.ndts_rcv_probes_mcast	+= READ_ONCE(st->rcv_probes_mcast);
2226			ndst.ndts_rcv_probes_ucast	+= READ_ONCE(st->rcv_probes_ucast);
2227			ndst.ndts_periodic_gc_runs	+= READ_ONCE(st->periodic_gc_runs);
2228			ndst.ndts_forced_gc_runs	+= READ_ONCE(st->forced_gc_runs);
2229			ndst.ndts_table_fulls		+= READ_ONCE(st->table_fulls);
2230		}
2231
2232		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2233				  NDTA_PAD))
2234			goto nla_put_failure;
2235	}
2236
2237	BUG_ON(tbl->parms.dev);
2238	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2239		goto nla_put_failure;
2240
2241	read_unlock_bh(&tbl->lock);
2242	nlmsg_end(skb, nlh);
2243	return 0;
2244
2245nla_put_failure:
2246	read_unlock_bh(&tbl->lock);
2247	nlmsg_cancel(skb, nlh);
2248	return -EMSGSIZE;
2249}
2250
2251static int neightbl_fill_param_info(struct sk_buff *skb,
2252				    struct neigh_table *tbl,
2253				    struct neigh_parms *parms,
2254				    u32 pid, u32 seq, int type,
2255				    unsigned int flags)
2256{
2257	struct ndtmsg *ndtmsg;
2258	struct nlmsghdr *nlh;
2259
2260	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2261	if (nlh == NULL)
2262		return -EMSGSIZE;
2263
2264	ndtmsg = nlmsg_data(nlh);
2265
2266	read_lock_bh(&tbl->lock);
2267	ndtmsg->ndtm_family = tbl->family;
2268	ndtmsg->ndtm_pad1   = 0;
2269	ndtmsg->ndtm_pad2   = 0;
2270
2271	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2272	    neightbl_fill_parms(skb, parms) < 0)
2273		goto errout;
2274
2275	read_unlock_bh(&tbl->lock);
2276	nlmsg_end(skb, nlh);
2277	return 0;
2278errout:
2279	read_unlock_bh(&tbl->lock);
2280	nlmsg_cancel(skb, nlh);
2281	return -EMSGSIZE;
2282}
2283
2284static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2285	[NDTA_NAME]		= { .type = NLA_STRING },
2286	[NDTA_THRESH1]		= { .type = NLA_U32 },
2287	[NDTA_THRESH2]		= { .type = NLA_U32 },
2288	[NDTA_THRESH3]		= { .type = NLA_U32 },
2289	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2290	[NDTA_PARMS]		= { .type = NLA_NESTED },
2291};
2292
2293static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2294	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2295	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2296	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2297	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2298	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2299	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2300	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2301	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2302	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2303	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2304	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2305	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2306	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2307	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2308	[NDTPA_INTERVAL_PROBE_TIME_MS]	= { .type = NLA_U64, .min = 1 },
2309};
2310
2311static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2312			struct netlink_ext_ack *extack)
2313{
2314	struct net *net = sock_net(skb->sk);
2315	struct neigh_table *tbl;
2316	struct ndtmsg *ndtmsg;
2317	struct nlattr *tb[NDTA_MAX+1];
2318	bool found = false;
2319	int err, tidx;
2320
2321	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2322				     nl_neightbl_policy, extack);
2323	if (err < 0)
2324		goto errout;
2325
2326	if (tb[NDTA_NAME] == NULL) {
2327		err = -EINVAL;
2328		goto errout;
2329	}
2330
2331	ndtmsg = nlmsg_data(nlh);
2332
2333	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2334		tbl = neigh_tables[tidx];
2335		if (!tbl)
2336			continue;
2337		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2338			continue;
2339		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2340			found = true;
2341			break;
2342		}
2343	}
2344
2345	if (!found)
2346		return -ENOENT;
2347
2348	/*
2349	 * We acquire tbl->lock to be nice to the periodic timers and
2350	 * make sure they always see a consistent set of values.
2351	 */
2352	write_lock_bh(&tbl->lock);
2353
2354	if (tb[NDTA_PARMS]) {
2355		struct nlattr *tbp[NDTPA_MAX+1];
2356		struct neigh_parms *p;
2357		int i, ifindex = 0;
2358
2359		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2360						  tb[NDTA_PARMS],
2361						  nl_ntbl_parm_policy, extack);
2362		if (err < 0)
2363			goto errout_tbl_lock;
2364
2365		if (tbp[NDTPA_IFINDEX])
2366			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2367
2368		p = lookup_neigh_parms(tbl, net, ifindex);
2369		if (p == NULL) {
2370			err = -ENOENT;
2371			goto errout_tbl_lock;
2372		}
2373
2374		for (i = 1; i <= NDTPA_MAX; i++) {
2375			if (tbp[i] == NULL)
2376				continue;
2377
2378			switch (i) {
2379			case NDTPA_QUEUE_LEN:
2380				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2381					      nla_get_u32(tbp[i]) *
2382					      SKB_TRUESIZE(ETH_FRAME_LEN));
2383				break;
2384			case NDTPA_QUEUE_LENBYTES:
2385				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2386					      nla_get_u32(tbp[i]));
2387				break;
2388			case NDTPA_PROXY_QLEN:
2389				NEIGH_VAR_SET(p, PROXY_QLEN,
2390					      nla_get_u32(tbp[i]));
2391				break;
2392			case NDTPA_APP_PROBES:
2393				NEIGH_VAR_SET(p, APP_PROBES,
2394					      nla_get_u32(tbp[i]));
2395				break;
2396			case NDTPA_UCAST_PROBES:
2397				NEIGH_VAR_SET(p, UCAST_PROBES,
2398					      nla_get_u32(tbp[i]));
2399				break;
2400			case NDTPA_MCAST_PROBES:
2401				NEIGH_VAR_SET(p, MCAST_PROBES,
2402					      nla_get_u32(tbp[i]));
2403				break;
2404			case NDTPA_MCAST_REPROBES:
2405				NEIGH_VAR_SET(p, MCAST_REPROBES,
2406					      nla_get_u32(tbp[i]));
2407				break;
2408			case NDTPA_BASE_REACHABLE_TIME:
2409				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2410					      nla_get_msecs(tbp[i]));
2411				/* update reachable_time as well, otherwise, the change will
2412				 * only be effective after the next time neigh_periodic_work
2413				 * decides to recompute it (can be multiple minutes)
2414				 */
2415				p->reachable_time =
2416					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2417				break;
2418			case NDTPA_GC_STALETIME:
2419				NEIGH_VAR_SET(p, GC_STALETIME,
2420					      nla_get_msecs(tbp[i]));
2421				break;
2422			case NDTPA_DELAY_PROBE_TIME:
2423				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2424					      nla_get_msecs(tbp[i]));
2425				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2426				break;
2427			case NDTPA_INTERVAL_PROBE_TIME_MS:
2428				NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2429					      nla_get_msecs(tbp[i]));
2430				break;
2431			case NDTPA_RETRANS_TIME:
2432				NEIGH_VAR_SET(p, RETRANS_TIME,
2433					      nla_get_msecs(tbp[i]));
2434				break;
2435			case NDTPA_ANYCAST_DELAY:
2436				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2437					      nla_get_msecs(tbp[i]));
2438				break;
2439			case NDTPA_PROXY_DELAY:
2440				NEIGH_VAR_SET(p, PROXY_DELAY,
2441					      nla_get_msecs(tbp[i]));
2442				break;
2443			case NDTPA_LOCKTIME:
2444				NEIGH_VAR_SET(p, LOCKTIME,
2445					      nla_get_msecs(tbp[i]));
2446				break;
2447			}
2448		}
2449	}
2450
2451	err = -ENOENT;
2452	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2453	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2454	    !net_eq(net, &init_net))
2455		goto errout_tbl_lock;
2456
2457	if (tb[NDTA_THRESH1])
2458		WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2459
2460	if (tb[NDTA_THRESH2])
2461		WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2462
2463	if (tb[NDTA_THRESH3])
2464		WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2465
2466	if (tb[NDTA_GC_INTERVAL])
2467		WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2468
2469	err = 0;
2470
2471errout_tbl_lock:
2472	write_unlock_bh(&tbl->lock);
2473errout:
2474	return err;
2475}
2476
2477static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2478				    struct netlink_ext_ack *extack)
2479{
2480	struct ndtmsg *ndtm;
2481
2482	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2483		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2484		return -EINVAL;
2485	}
2486
2487	ndtm = nlmsg_data(nlh);
2488	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2489		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2490		return -EINVAL;
2491	}
2492
2493	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2494		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2495		return -EINVAL;
2496	}
2497
2498	return 0;
2499}
2500
2501static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2502{
2503	const struct nlmsghdr *nlh = cb->nlh;
2504	struct net *net = sock_net(skb->sk);
2505	int family, tidx, nidx = 0;
2506	int tbl_skip = cb->args[0];
2507	int neigh_skip = cb->args[1];
2508	struct neigh_table *tbl;
2509
2510	if (cb->strict_check) {
2511		int err = neightbl_valid_dump_info(nlh, cb->extack);
2512
2513		if (err < 0)
2514			return err;
2515	}
2516
2517	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2518
2519	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2520		struct neigh_parms *p;
2521
2522		tbl = neigh_tables[tidx];
2523		if (!tbl)
2524			continue;
2525
2526		if (tidx < tbl_skip || (family && tbl->family != family))
2527			continue;
2528
2529		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2530				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2531				       NLM_F_MULTI) < 0)
2532			break;
2533
2534		nidx = 0;
2535		p = list_next_entry(&tbl->parms, list);
2536		list_for_each_entry_from(p, &tbl->parms_list, list) {
2537			if (!net_eq(neigh_parms_net(p), net))
2538				continue;
2539
2540			if (nidx < neigh_skip)
2541				goto next;
2542
2543			if (neightbl_fill_param_info(skb, tbl, p,
2544						     NETLINK_CB(cb->skb).portid,
2545						     nlh->nlmsg_seq,
2546						     RTM_NEWNEIGHTBL,
2547						     NLM_F_MULTI) < 0)
2548				goto out;
2549		next:
2550			nidx++;
2551		}
2552
2553		neigh_skip = 0;
2554	}
2555out:
2556	cb->args[0] = tidx;
2557	cb->args[1] = nidx;
2558
2559	return skb->len;
2560}
2561
2562static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2563			   u32 pid, u32 seq, int type, unsigned int flags)
2564{
2565	u32 neigh_flags, neigh_flags_ext;
2566	unsigned long now = jiffies;
2567	struct nda_cacheinfo ci;
2568	struct nlmsghdr *nlh;
2569	struct ndmsg *ndm;
2570
2571	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2572	if (nlh == NULL)
2573		return -EMSGSIZE;
2574
2575	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2576	neigh_flags     = neigh->flags & NTF_OLD_MASK;
2577
2578	ndm = nlmsg_data(nlh);
2579	ndm->ndm_family	 = neigh->ops->family;
2580	ndm->ndm_pad1    = 0;
2581	ndm->ndm_pad2    = 0;
2582	ndm->ndm_flags	 = neigh_flags;
2583	ndm->ndm_type	 = neigh->type;
2584	ndm->ndm_ifindex = neigh->dev->ifindex;
2585
2586	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2587		goto nla_put_failure;
2588
2589	read_lock_bh(&neigh->lock);
2590	ndm->ndm_state	 = neigh->nud_state;
2591	if (neigh->nud_state & NUD_VALID) {
2592		char haddr[MAX_ADDR_LEN];
2593
2594		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2595		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2596			read_unlock_bh(&neigh->lock);
2597			goto nla_put_failure;
2598		}
2599	}
2600
2601	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2602	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2603	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2604	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2605	read_unlock_bh(&neigh->lock);
2606
2607	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2608	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2609		goto nla_put_failure;
2610
2611	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2612		goto nla_put_failure;
2613	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2614		goto nla_put_failure;
2615
2616	nlmsg_end(skb, nlh);
2617	return 0;
2618
2619nla_put_failure:
2620	nlmsg_cancel(skb, nlh);
2621	return -EMSGSIZE;
2622}
2623
2624static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2625			    u32 pid, u32 seq, int type, unsigned int flags,
2626			    struct neigh_table *tbl)
2627{
2628	u32 neigh_flags, neigh_flags_ext;
2629	struct nlmsghdr *nlh;
2630	struct ndmsg *ndm;
2631
2632	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2633	if (nlh == NULL)
2634		return -EMSGSIZE;
2635
2636	neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2637	neigh_flags     = pn->flags & NTF_OLD_MASK;
2638
2639	ndm = nlmsg_data(nlh);
2640	ndm->ndm_family	 = tbl->family;
2641	ndm->ndm_pad1    = 0;
2642	ndm->ndm_pad2    = 0;
2643	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
2644	ndm->ndm_type	 = RTN_UNICAST;
2645	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2646	ndm->ndm_state	 = NUD_NONE;
2647
2648	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2649		goto nla_put_failure;
2650
2651	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2652		goto nla_put_failure;
2653	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2654		goto nla_put_failure;
2655
2656	nlmsg_end(skb, nlh);
2657	return 0;
2658
2659nla_put_failure:
2660	nlmsg_cancel(skb, nlh);
2661	return -EMSGSIZE;
2662}
2663
2664static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2665{
2666	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2667	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2668}
2669
2670static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2671{
2672	struct net_device *master;
2673
2674	if (!master_idx)
2675		return false;
2676
2677	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2678
2679	/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2680	 * invalid value for ifindex to denote "no master".
2681	 */
2682	if (master_idx == -1)
2683		return !!master;
2684
2685	if (!master || master->ifindex != master_idx)
2686		return true;
2687
2688	return false;
2689}
2690
2691static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2692{
2693	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2694		return true;
2695
2696	return false;
2697}
2698
2699struct neigh_dump_filter {
2700	int master_idx;
2701	int dev_idx;
2702};
2703
2704static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2705			    struct netlink_callback *cb,
2706			    struct neigh_dump_filter *filter)
2707{
2708	struct net *net = sock_net(skb->sk);
2709	struct neighbour *n;
2710	int rc, h, s_h = cb->args[1];
2711	int idx, s_idx = idx = cb->args[2];
2712	struct neigh_hash_table *nht;
2713	unsigned int flags = NLM_F_MULTI;
2714
2715	if (filter->dev_idx || filter->master_idx)
2716		flags |= NLM_F_DUMP_FILTERED;
2717
2718	rcu_read_lock();
2719	nht = rcu_dereference(tbl->nht);
2720
2721	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2722		if (h > s_h)
2723			s_idx = 0;
2724		for (n = rcu_dereference(nht->hash_buckets[h]), idx = 0;
2725		     n != NULL;
2726		     n = rcu_dereference(n->next)) {
2727			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2728				goto next;
2729			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2730			    neigh_master_filtered(n->dev, filter->master_idx))
2731				goto next;
2732			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2733					    cb->nlh->nlmsg_seq,
2734					    RTM_NEWNEIGH,
2735					    flags) < 0) {
2736				rc = -1;
2737				goto out;
2738			}
2739next:
2740			idx++;
2741		}
2742	}
2743	rc = skb->len;
2744out:
2745	rcu_read_unlock();
2746	cb->args[1] = h;
2747	cb->args[2] = idx;
2748	return rc;
2749}
2750
2751static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2752			     struct netlink_callback *cb,
2753			     struct neigh_dump_filter *filter)
2754{
2755	struct pneigh_entry *n;
2756	struct net *net = sock_net(skb->sk);
2757	int rc, h, s_h = cb->args[3];
2758	int idx, s_idx = idx = cb->args[4];
2759	unsigned int flags = NLM_F_MULTI;
2760
2761	if (filter->dev_idx || filter->master_idx)
2762		flags |= NLM_F_DUMP_FILTERED;
2763
2764	read_lock_bh(&tbl->lock);
2765
2766	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2767		if (h > s_h)
2768			s_idx = 0;
2769		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2770			if (idx < s_idx || pneigh_net(n) != net)
2771				goto next;
2772			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2773			    neigh_master_filtered(n->dev, filter->master_idx))
2774				goto next;
2775			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2776					    cb->nlh->nlmsg_seq,
2777					    RTM_NEWNEIGH, flags, tbl) < 0) {
2778				read_unlock_bh(&tbl->lock);
2779				rc = -1;
2780				goto out;
2781			}
2782		next:
2783			idx++;
2784		}
2785	}
2786
2787	read_unlock_bh(&tbl->lock);
2788	rc = skb->len;
2789out:
2790	cb->args[3] = h;
2791	cb->args[4] = idx;
2792	return rc;
2793
2794}
2795
2796static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2797				bool strict_check,
2798				struct neigh_dump_filter *filter,
2799				struct netlink_ext_ack *extack)
2800{
2801	struct nlattr *tb[NDA_MAX + 1];
2802	int err, i;
2803
2804	if (strict_check) {
2805		struct ndmsg *ndm;
2806
2807		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2808			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2809			return -EINVAL;
2810		}
2811
2812		ndm = nlmsg_data(nlh);
2813		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2814		    ndm->ndm_state || ndm->ndm_type) {
2815			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2816			return -EINVAL;
2817		}
2818
2819		if (ndm->ndm_flags & ~NTF_PROXY) {
2820			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2821			return -EINVAL;
2822		}
2823
2824		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2825						    tb, NDA_MAX, nda_policy,
2826						    extack);
2827	} else {
2828		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2829					     NDA_MAX, nda_policy, extack);
2830	}
2831	if (err < 0)
2832		return err;
2833
2834	for (i = 0; i <= NDA_MAX; ++i) {
2835		if (!tb[i])
2836			continue;
2837
2838		/* all new attributes should require strict_check */
2839		switch (i) {
2840		case NDA_IFINDEX:
2841			filter->dev_idx = nla_get_u32(tb[i]);
2842			break;
2843		case NDA_MASTER:
2844			filter->master_idx = nla_get_u32(tb[i]);
2845			break;
2846		default:
2847			if (strict_check) {
2848				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2849				return -EINVAL;
2850			}
2851		}
2852	}
2853
2854	return 0;
2855}
2856
2857static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2858{
2859	const struct nlmsghdr *nlh = cb->nlh;
2860	struct neigh_dump_filter filter = {};
2861	struct neigh_table *tbl;
2862	int t, family, s_t;
2863	int proxy = 0;
2864	int err;
2865
2866	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2867
2868	/* check for full ndmsg structure presence, family member is
2869	 * the same for both structures
2870	 */
2871	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2872	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2873		proxy = 1;
2874
2875	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2876	if (err < 0 && cb->strict_check)
2877		return err;
2878
2879	s_t = cb->args[0];
2880
2881	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2882		tbl = neigh_tables[t];
2883
2884		if (!tbl)
2885			continue;
2886		if (t < s_t || (family && tbl->family != family))
2887			continue;
2888		if (t > s_t)
2889			memset(&cb->args[1], 0, sizeof(cb->args) -
2890						sizeof(cb->args[0]));
2891		if (proxy)
2892			err = pneigh_dump_table(tbl, skb, cb, &filter);
2893		else
2894			err = neigh_dump_table(tbl, skb, cb, &filter);
2895		if (err < 0)
2896			break;
2897	}
2898
2899	cb->args[0] = t;
2900	return skb->len;
2901}
2902
2903static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2904			       struct neigh_table **tbl,
2905			       void **dst, int *dev_idx, u8 *ndm_flags,
2906			       struct netlink_ext_ack *extack)
2907{
2908	struct nlattr *tb[NDA_MAX + 1];
2909	struct ndmsg *ndm;
2910	int err, i;
2911
2912	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2913		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2914		return -EINVAL;
2915	}
2916
2917	ndm = nlmsg_data(nlh);
2918	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2919	    ndm->ndm_type) {
2920		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2921		return -EINVAL;
2922	}
2923
2924	if (ndm->ndm_flags & ~NTF_PROXY) {
2925		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2926		return -EINVAL;
2927	}
2928
2929	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2930					    NDA_MAX, nda_policy, extack);
2931	if (err < 0)
2932		return err;
2933
2934	*ndm_flags = ndm->ndm_flags;
2935	*dev_idx = ndm->ndm_ifindex;
2936	*tbl = neigh_find_table(ndm->ndm_family);
2937	if (*tbl == NULL) {
2938		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2939		return -EAFNOSUPPORT;
2940	}
2941
2942	for (i = 0; i <= NDA_MAX; ++i) {
2943		if (!tb[i])
2944			continue;
2945
2946		switch (i) {
2947		case NDA_DST:
2948			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2949				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2950				return -EINVAL;
2951			}
2952			*dst = nla_data(tb[i]);
2953			break;
2954		default:
2955			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2956			return -EINVAL;
2957		}
2958	}
2959
2960	return 0;
2961}
2962
2963static inline size_t neigh_nlmsg_size(void)
2964{
2965	return NLMSG_ALIGN(sizeof(struct ndmsg))
2966	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2967	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2968	       + nla_total_size(sizeof(struct nda_cacheinfo))
2969	       + nla_total_size(4)  /* NDA_PROBES */
2970	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2971	       + nla_total_size(1); /* NDA_PROTOCOL */
2972}
2973
2974static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2975			   u32 pid, u32 seq)
2976{
2977	struct sk_buff *skb;
2978	int err = 0;
2979
2980	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2981	if (!skb)
2982		return -ENOBUFS;
2983
2984	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2985	if (err) {
2986		kfree_skb(skb);
2987		goto errout;
2988	}
2989
2990	err = rtnl_unicast(skb, net, pid);
2991errout:
2992	return err;
2993}
2994
2995static inline size_t pneigh_nlmsg_size(void)
2996{
2997	return NLMSG_ALIGN(sizeof(struct ndmsg))
2998	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2999	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
3000	       + nla_total_size(1); /* NDA_PROTOCOL */
3001}
3002
3003static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
3004			    u32 pid, u32 seq, struct neigh_table *tbl)
3005{
3006	struct sk_buff *skb;
3007	int err = 0;
3008
3009	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
3010	if (!skb)
3011		return -ENOBUFS;
3012
3013	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3014	if (err) {
3015		kfree_skb(skb);
3016		goto errout;
3017	}
3018
3019	err = rtnl_unicast(skb, net, pid);
3020errout:
3021	return err;
3022}
3023
3024static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3025		     struct netlink_ext_ack *extack)
3026{
3027	struct net *net = sock_net(in_skb->sk);
3028	struct net_device *dev = NULL;
3029	struct neigh_table *tbl = NULL;
3030	struct neighbour *neigh;
3031	void *dst = NULL;
3032	u8 ndm_flags = 0;
3033	int dev_idx = 0;
3034	int err;
3035
3036	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
3037				  extack);
3038	if (err < 0)
3039		return err;
3040
3041	if (dev_idx) {
3042		dev = __dev_get_by_index(net, dev_idx);
3043		if (!dev) {
3044			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
3045			return -ENODEV;
3046		}
3047	}
3048
3049	if (!dst) {
3050		NL_SET_ERR_MSG(extack, "Network address not specified");
3051		return -EINVAL;
3052	}
3053
3054	if (ndm_flags & NTF_PROXY) {
3055		struct pneigh_entry *pn;
3056
3057		pn = pneigh_lookup(tbl, net, dst, dev, 0);
3058		if (!pn) {
3059			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3060			return -ENOENT;
3061		}
3062		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3063					nlh->nlmsg_seq, tbl);
3064	}
3065
3066	if (!dev) {
3067		NL_SET_ERR_MSG(extack, "No device specified");
3068		return -EINVAL;
3069	}
3070
3071	neigh = neigh_lookup(tbl, dst, dev);
3072	if (!neigh) {
3073		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3074		return -ENOENT;
3075	}
3076
3077	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3078			      nlh->nlmsg_seq);
3079
3080	neigh_release(neigh);
3081
3082	return err;
3083}
3084
3085void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3086{
3087	int chain;
3088	struct neigh_hash_table *nht;
3089
3090	rcu_read_lock();
3091	nht = rcu_dereference(tbl->nht);
3092
3093	read_lock_bh(&tbl->lock); /* avoid resizes */
3094	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3095		struct neighbour *n;
3096
3097		for (n = rcu_dereference(nht->hash_buckets[chain]);
3098		     n != NULL;
3099		     n = rcu_dereference(n->next))
3100			cb(n, cookie);
3101	}
3102	read_unlock_bh(&tbl->lock);
3103	rcu_read_unlock();
3104}
3105EXPORT_SYMBOL(neigh_for_each);
3106
3107/* The tbl->lock must be held as a writer and BH disabled. */
3108void __neigh_for_each_release(struct neigh_table *tbl,
3109			      int (*cb)(struct neighbour *))
3110{
3111	int chain;
3112	struct neigh_hash_table *nht;
3113
3114	nht = rcu_dereference_protected(tbl->nht,
3115					lockdep_is_held(&tbl->lock));
3116	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3117		struct neighbour *n;
3118		struct neighbour __rcu **np;
3119
3120		np = &nht->hash_buckets[chain];
3121		while ((n = rcu_dereference_protected(*np,
3122					lockdep_is_held(&tbl->lock))) != NULL) {
3123			int release;
3124
3125			write_lock(&n->lock);
3126			release = cb(n);
3127			if (release) {
3128				rcu_assign_pointer(*np,
3129					rcu_dereference_protected(n->next,
3130						lockdep_is_held(&tbl->lock)));
3131				neigh_mark_dead(n);
3132			} else
3133				np = &n->next;
3134			write_unlock(&n->lock);
3135			if (release)
3136				neigh_cleanup_and_release(n);
3137		}
3138	}
3139}
3140EXPORT_SYMBOL(__neigh_for_each_release);
3141
3142int neigh_xmit(int index, struct net_device *dev,
3143	       const void *addr, struct sk_buff *skb)
3144{
3145	int err = -EAFNOSUPPORT;
3146	if (likely(index < NEIGH_NR_TABLES)) {
3147		struct neigh_table *tbl;
3148		struct neighbour *neigh;
3149
3150		tbl = neigh_tables[index];
3151		if (!tbl)
3152			goto out;
3153		rcu_read_lock();
3154		if (index == NEIGH_ARP_TABLE) {
3155			u32 key = *((u32 *)addr);
3156
3157			neigh = __ipv4_neigh_lookup_noref(dev, key);
3158		} else {
3159			neigh = __neigh_lookup_noref(tbl, addr, dev);
3160		}
3161		if (!neigh)
3162			neigh = __neigh_create(tbl, addr, dev, false);
3163		err = PTR_ERR(neigh);
3164		if (IS_ERR(neigh)) {
3165			rcu_read_unlock();
3166			goto out_kfree_skb;
3167		}
3168		err = READ_ONCE(neigh->output)(neigh, skb);
3169		rcu_read_unlock();
3170	}
3171	else if (index == NEIGH_LINK_TABLE) {
3172		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3173				      addr, NULL, skb->len);
3174		if (err < 0)
3175			goto out_kfree_skb;
3176		err = dev_queue_xmit(skb);
3177	}
3178out:
3179	return err;
3180out_kfree_skb:
3181	kfree_skb(skb);
3182	goto out;
3183}
3184EXPORT_SYMBOL(neigh_xmit);
3185
3186#ifdef CONFIG_PROC_FS
3187
3188static struct neighbour *neigh_get_first(struct seq_file *seq)
3189{
3190	struct neigh_seq_state *state = seq->private;
3191	struct net *net = seq_file_net(seq);
3192	struct neigh_hash_table *nht = state->nht;
3193	struct neighbour *n = NULL;
3194	int bucket;
3195
3196	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3197	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3198		n = rcu_dereference(nht->hash_buckets[bucket]);
3199
3200		while (n) {
3201			if (!net_eq(dev_net(n->dev), net))
3202				goto next;
3203			if (state->neigh_sub_iter) {
3204				loff_t fakep = 0;
3205				void *v;
3206
3207				v = state->neigh_sub_iter(state, n, &fakep);
3208				if (!v)
3209					goto next;
3210			}
3211			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3212				break;
3213			if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3214				break;
3215next:
3216			n = rcu_dereference(n->next);
3217		}
3218
3219		if (n)
3220			break;
3221	}
3222	state->bucket = bucket;
3223
3224	return n;
3225}
3226
3227static struct neighbour *neigh_get_next(struct seq_file *seq,
3228					struct neighbour *n,
3229					loff_t *pos)
3230{
3231	struct neigh_seq_state *state = seq->private;
3232	struct net *net = seq_file_net(seq);
3233	struct neigh_hash_table *nht = state->nht;
3234
3235	if (state->neigh_sub_iter) {
3236		void *v = state->neigh_sub_iter(state, n, pos);
3237		if (v)
3238			return n;
3239	}
3240	n = rcu_dereference(n->next);
3241
3242	while (1) {
3243		while (n) {
3244			if (!net_eq(dev_net(n->dev), net))
3245				goto next;
3246			if (state->neigh_sub_iter) {
3247				void *v = state->neigh_sub_iter(state, n, pos);
3248				if (v)
3249					return n;
3250				goto next;
3251			}
3252			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3253				break;
3254
3255			if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3256				break;
3257next:
3258			n = rcu_dereference(n->next);
3259		}
3260
3261		if (n)
3262			break;
3263
3264		if (++state->bucket >= (1 << nht->hash_shift))
3265			break;
3266
3267		n = rcu_dereference(nht->hash_buckets[state->bucket]);
3268	}
3269
3270	if (n && pos)
3271		--(*pos);
3272	return n;
3273}
3274
3275static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3276{
3277	struct neighbour *n = neigh_get_first(seq);
3278
3279	if (n) {
3280		--(*pos);
3281		while (*pos) {
3282			n = neigh_get_next(seq, n, pos);
3283			if (!n)
3284				break;
3285		}
3286	}
3287	return *pos ? NULL : n;
3288}
3289
3290static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3291{
3292	struct neigh_seq_state *state = seq->private;
3293	struct net *net = seq_file_net(seq);
3294	struct neigh_table *tbl = state->tbl;
3295	struct pneigh_entry *pn = NULL;
3296	int bucket;
3297
3298	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3299	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3300		pn = tbl->phash_buckets[bucket];
3301		while (pn && !net_eq(pneigh_net(pn), net))
3302			pn = pn->next;
3303		if (pn)
3304			break;
3305	}
3306	state->bucket = bucket;
3307
3308	return pn;
3309}
3310
3311static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3312					    struct pneigh_entry *pn,
3313					    loff_t *pos)
3314{
3315	struct neigh_seq_state *state = seq->private;
3316	struct net *net = seq_file_net(seq);
3317	struct neigh_table *tbl = state->tbl;
3318
3319	do {
3320		pn = pn->next;
3321	} while (pn && !net_eq(pneigh_net(pn), net));
3322
3323	while (!pn) {
3324		if (++state->bucket > PNEIGH_HASHMASK)
3325			break;
3326		pn = tbl->phash_buckets[state->bucket];
3327		while (pn && !net_eq(pneigh_net(pn), net))
3328			pn = pn->next;
3329		if (pn)
3330			break;
3331	}
3332
3333	if (pn && pos)
3334		--(*pos);
3335
3336	return pn;
3337}
3338
3339static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3340{
3341	struct pneigh_entry *pn = pneigh_get_first(seq);
3342
3343	if (pn) {
3344		--(*pos);
3345		while (*pos) {
3346			pn = pneigh_get_next(seq, pn, pos);
3347			if (!pn)
3348				break;
3349		}
3350	}
3351	return *pos ? NULL : pn;
3352}
3353
3354static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3355{
3356	struct neigh_seq_state *state = seq->private;
3357	void *rc;
3358	loff_t idxpos = *pos;
3359
3360	rc = neigh_get_idx(seq, &idxpos);
3361	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3362		rc = pneigh_get_idx(seq, &idxpos);
3363
3364	return rc;
3365}
3366
3367void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3368	__acquires(tbl->lock)
3369	__acquires(rcu)
3370{
3371	struct neigh_seq_state *state = seq->private;
3372
3373	state->tbl = tbl;
3374	state->bucket = 0;
3375	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3376
3377	rcu_read_lock();
3378	state->nht = rcu_dereference(tbl->nht);
3379	read_lock_bh(&tbl->lock);
3380
3381	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3382}
3383EXPORT_SYMBOL(neigh_seq_start);
3384
3385void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3386{
3387	struct neigh_seq_state *state;
3388	void *rc;
3389
3390	if (v == SEQ_START_TOKEN) {
3391		rc = neigh_get_first(seq);
3392		goto out;
3393	}
3394
3395	state = seq->private;
3396	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3397		rc = neigh_get_next(seq, v, NULL);
3398		if (rc)
3399			goto out;
3400		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3401			rc = pneigh_get_first(seq);
3402	} else {
3403		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3404		rc = pneigh_get_next(seq, v, NULL);
3405	}
3406out:
3407	++(*pos);
3408	return rc;
3409}
3410EXPORT_SYMBOL(neigh_seq_next);
3411
3412void neigh_seq_stop(struct seq_file *seq, void *v)
3413	__releases(tbl->lock)
3414	__releases(rcu)
3415{
3416	struct neigh_seq_state *state = seq->private;
3417	struct neigh_table *tbl = state->tbl;
3418
3419	read_unlock_bh(&tbl->lock);
3420	rcu_read_unlock();
3421}
3422EXPORT_SYMBOL(neigh_seq_stop);
3423
3424/* statistics via seq_file */
3425
3426static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3427{
3428	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3429	int cpu;
3430
3431	if (*pos == 0)
3432		return SEQ_START_TOKEN;
3433
3434	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3435		if (!cpu_possible(cpu))
3436			continue;
3437		*pos = cpu+1;
3438		return per_cpu_ptr(tbl->stats, cpu);
3439	}
3440	return NULL;
3441}
3442
3443static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3444{
3445	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3446	int cpu;
3447
3448	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3449		if (!cpu_possible(cpu))
3450			continue;
3451		*pos = cpu+1;
3452		return per_cpu_ptr(tbl->stats, cpu);
3453	}
3454	(*pos)++;
3455	return NULL;
3456}
3457
3458static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3459{
3460
3461}
3462
3463static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3464{
3465	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3466	struct neigh_statistics *st = v;
3467
3468	if (v == SEQ_START_TOKEN) {
3469		seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3470		return 0;
3471	}
3472
3473	seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3474			"%08lx         %08lx         %08lx         "
3475			"%08lx       %08lx            %08lx\n",
3476		   atomic_read(&tbl->entries),
3477
3478		   st->allocs,
3479		   st->destroys,
3480		   st->hash_grows,
3481
3482		   st->lookups,
3483		   st->hits,
3484
3485		   st->res_failed,
3486
3487		   st->rcv_probes_mcast,
3488		   st->rcv_probes_ucast,
3489
3490		   st->periodic_gc_runs,
3491		   st->forced_gc_runs,
3492		   st->unres_discards,
3493		   st->table_fulls
3494		   );
3495
3496	return 0;
3497}
3498
3499static const struct seq_operations neigh_stat_seq_ops = {
3500	.start	= neigh_stat_seq_start,
3501	.next	= neigh_stat_seq_next,
3502	.stop	= neigh_stat_seq_stop,
3503	.show	= neigh_stat_seq_show,
3504};
3505#endif /* CONFIG_PROC_FS */
3506
3507static void __neigh_notify(struct neighbour *n, int type, int flags,
3508			   u32 pid)
3509{
3510	struct net *net = dev_net(n->dev);
3511	struct sk_buff *skb;
3512	int err = -ENOBUFS;
3513
3514	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3515	if (skb == NULL)
3516		goto errout;
3517
3518	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3519	if (err < 0) {
3520		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3521		WARN_ON(err == -EMSGSIZE);
3522		kfree_skb(skb);
3523		goto errout;
3524	}
3525	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3526	return;
3527errout:
3528	if (err < 0)
3529		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3530}
3531
3532void neigh_app_ns(struct neighbour *n)
3533{
3534	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3535}
3536EXPORT_SYMBOL(neigh_app_ns);
3537
3538#ifdef CONFIG_SYSCTL
3539static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3540
3541static int proc_unres_qlen(struct ctl_table *ctl, int write,
3542			   void *buffer, size_t *lenp, loff_t *ppos)
3543{
3544	int size, ret;
3545	struct ctl_table tmp = *ctl;
3546
3547	tmp.extra1 = SYSCTL_ZERO;
3548	tmp.extra2 = &unres_qlen_max;
3549	tmp.data = &size;
3550
3551	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3552	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3553
3554	if (write && !ret)
3555		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3556	return ret;
3557}
3558
 
 
 
 
 
 
 
 
 
 
 
 
3559static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3560				  int index)
3561{
3562	struct net_device *dev;
3563	int family = neigh_parms_family(p);
3564
3565	rcu_read_lock();
3566	for_each_netdev_rcu(net, dev) {
3567		struct neigh_parms *dst_p =
3568				neigh_get_dev_parms_rcu(dev, family);
3569
3570		if (dst_p && !test_bit(index, dst_p->data_state))
3571			dst_p->data[index] = p->data[index];
3572	}
3573	rcu_read_unlock();
3574}
3575
3576static void neigh_proc_update(struct ctl_table *ctl, int write)
3577{
3578	struct net_device *dev = ctl->extra1;
3579	struct neigh_parms *p = ctl->extra2;
3580	struct net *net = neigh_parms_net(p);
3581	int index = (int *) ctl->data - p->data;
3582
3583	if (!write)
3584		return;
3585
3586	set_bit(index, p->data_state);
3587	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3588		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3589	if (!dev) /* NULL dev means this is default value */
3590		neigh_copy_dflt_parms(net, p, index);
3591}
3592
3593static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3594					   void *buffer, size_t *lenp,
3595					   loff_t *ppos)
3596{
3597	struct ctl_table tmp = *ctl;
3598	int ret;
3599
3600	tmp.extra1 = SYSCTL_ZERO;
3601	tmp.extra2 = SYSCTL_INT_MAX;
3602
3603	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3604	neigh_proc_update(ctl, write);
3605	return ret;
3606}
3607
3608static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
3609						   void *buffer, size_t *lenp, loff_t *ppos)
3610{
3611	struct ctl_table tmp = *ctl;
3612	int ret;
3613
3614	int min = msecs_to_jiffies(1);
3615
3616	tmp.extra1 = &min;
3617	tmp.extra2 = NULL;
3618
3619	ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3620	neigh_proc_update(ctl, write);
3621	return ret;
3622}
3623
3624int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3625			size_t *lenp, loff_t *ppos)
3626{
3627	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3628
3629	neigh_proc_update(ctl, write);
3630	return ret;
3631}
3632EXPORT_SYMBOL(neigh_proc_dointvec);
3633
3634int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3635				size_t *lenp, loff_t *ppos)
3636{
3637	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3638
3639	neigh_proc_update(ctl, write);
3640	return ret;
3641}
3642EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3643
3644static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3645					      void *buffer, size_t *lenp,
3646					      loff_t *ppos)
3647{
3648	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3649
3650	neigh_proc_update(ctl, write);
3651	return ret;
3652}
3653
3654int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3655				   void *buffer, size_t *lenp, loff_t *ppos)
3656{
3657	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3658
3659	neigh_proc_update(ctl, write);
3660	return ret;
3661}
3662EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3663
3664static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3665					  void *buffer, size_t *lenp,
3666					  loff_t *ppos)
3667{
3668	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3669
3670	neigh_proc_update(ctl, write);
3671	return ret;
3672}
3673
3674static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3675					  void *buffer, size_t *lenp,
3676					  loff_t *ppos)
3677{
3678	struct neigh_parms *p = ctl->extra2;
3679	int ret;
3680
3681	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3682		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3683	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3684		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3685	else
3686		ret = -1;
3687
3688	if (write && ret == 0) {
3689		/* update reachable_time as well, otherwise, the change will
3690		 * only be effective after the next time neigh_periodic_work
3691		 * decides to recompute it
3692		 */
3693		p->reachable_time =
3694			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3695	}
3696	return ret;
3697}
3698
3699#define NEIGH_PARMS_DATA_OFFSET(index)	\
3700	(&((struct neigh_parms *) 0)->data[index])
3701
3702#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3703	[NEIGH_VAR_ ## attr] = { \
3704		.procname	= name, \
3705		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3706		.maxlen		= sizeof(int), \
3707		.mode		= mval, \
3708		.proc_handler	= proc, \
3709	}
3710
3711#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3712	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3713
3714#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3715	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3716
3717#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3718	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3719
3720#define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3721	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3722
3723#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3724	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3725
3726#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3727	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3728
3729static struct neigh_sysctl_table {
3730	struct ctl_table_header *sysctl_header;
3731	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3732} neigh_sysctl_template __read_mostly = {
3733	.neigh_vars = {
3734		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3735		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3736		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3737		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3738		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3739		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3740		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3741		NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3742						       "interval_probe_time_ms"),
3743		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3744		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3745		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3746		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3747		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3748		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3749		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3750		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3751		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3752		[NEIGH_VAR_GC_INTERVAL] = {
3753			.procname	= "gc_interval",
3754			.maxlen		= sizeof(int),
3755			.mode		= 0644,
3756			.proc_handler	= proc_dointvec_jiffies,
3757		},
3758		[NEIGH_VAR_GC_THRESH1] = {
3759			.procname	= "gc_thresh1",
3760			.maxlen		= sizeof(int),
3761			.mode		= 0644,
3762			.extra1		= SYSCTL_ZERO,
3763			.extra2		= SYSCTL_INT_MAX,
3764			.proc_handler	= proc_dointvec_minmax,
3765		},
3766		[NEIGH_VAR_GC_THRESH2] = {
3767			.procname	= "gc_thresh2",
3768			.maxlen		= sizeof(int),
3769			.mode		= 0644,
3770			.extra1		= SYSCTL_ZERO,
3771			.extra2		= SYSCTL_INT_MAX,
3772			.proc_handler	= proc_dointvec_minmax,
3773		},
3774		[NEIGH_VAR_GC_THRESH3] = {
3775			.procname	= "gc_thresh3",
3776			.maxlen		= sizeof(int),
3777			.mode		= 0644,
3778			.extra1		= SYSCTL_ZERO,
3779			.extra2		= SYSCTL_INT_MAX,
3780			.proc_handler	= proc_dointvec_minmax,
3781		},
3782		{},
3783	},
3784};
3785
3786int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3787			  proc_handler *handler)
3788{
3789	int i;
3790	struct neigh_sysctl_table *t;
3791	const char *dev_name_source;
3792	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3793	char *p_name;
3794	size_t neigh_vars_size;
3795
3796	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3797	if (!t)
3798		goto err;
3799
3800	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3801		t->neigh_vars[i].data += (long) p;
3802		t->neigh_vars[i].extra1 = dev;
3803		t->neigh_vars[i].extra2 = p;
3804	}
3805
3806	neigh_vars_size = ARRAY_SIZE(t->neigh_vars);
3807	if (dev) {
3808		dev_name_source = dev->name;
3809		/* Terminate the table early */
3810		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3811		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3812		neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
3813	} else {
3814		struct neigh_table *tbl = p->tbl;
3815		dev_name_source = "default";
3816		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3817		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3818		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3819		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3820	}
3821
3822	if (handler) {
3823		/* RetransTime */
3824		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3825		/* ReachableTime */
3826		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3827		/* RetransTime (in milliseconds)*/
3828		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3829		/* ReachableTime (in milliseconds) */
3830		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3831	} else {
3832		/* Those handlers will update p->reachable_time after
3833		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3834		 * applied after the next neighbour update instead of waiting for
3835		 * neigh_periodic_work to update its value (can be multiple minutes)
3836		 * So any handler that replaces them should do this as well
3837		 */
3838		/* ReachableTime */
3839		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3840			neigh_proc_base_reachable_time;
3841		/* ReachableTime (in milliseconds) */
3842		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3843			neigh_proc_base_reachable_time;
3844	}
3845
 
 
 
 
3846	switch (neigh_parms_family(p)) {
3847	case AF_INET:
3848	      p_name = "ipv4";
3849	      break;
3850	case AF_INET6:
3851	      p_name = "ipv6";
3852	      break;
3853	default:
3854	      BUG();
3855	}
3856
3857	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3858		p_name, dev_name_source);
3859	t->sysctl_header = register_net_sysctl_sz(neigh_parms_net(p),
3860						  neigh_path, t->neigh_vars,
3861						  neigh_vars_size);
3862	if (!t->sysctl_header)
3863		goto free;
3864
3865	p->sysctl_table = t;
3866	return 0;
3867
3868free:
3869	kfree(t);
3870err:
3871	return -ENOBUFS;
3872}
3873EXPORT_SYMBOL(neigh_sysctl_register);
3874
3875void neigh_sysctl_unregister(struct neigh_parms *p)
3876{
3877	if (p->sysctl_table) {
3878		struct neigh_sysctl_table *t = p->sysctl_table;
3879		p->sysctl_table = NULL;
3880		unregister_net_sysctl_table(t->sysctl_header);
3881		kfree(t);
3882	}
3883}
3884EXPORT_SYMBOL(neigh_sysctl_unregister);
3885
3886#endif	/* CONFIG_SYSCTL */
3887
3888static int __init neigh_init(void)
3889{
3890	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3891	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3892	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3893
3894	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3895		      0);
3896	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3897
3898	return 0;
3899}
3900
3901subsys_initcall(neigh_init);