Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Generic address resolution entity
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   8 *
   9 *	Fixes:
  10 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  11 *	Harald Welte		Add neighbour cache statistics like rtstat
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/slab.h>
  17#include <linux/kmemleak.h>
  18#include <linux/types.h>
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/socket.h>
  22#include <linux/netdevice.h>
  23#include <linux/proc_fs.h>
  24#ifdef CONFIG_SYSCTL
  25#include <linux/sysctl.h>
  26#endif
  27#include <linux/times.h>
  28#include <net/net_namespace.h>
  29#include <net/neighbour.h>
  30#include <net/arp.h>
  31#include <net/dst.h>
  32#include <net/sock.h>
  33#include <net/netevent.h>
  34#include <net/netlink.h>
  35#include <linux/rtnetlink.h>
  36#include <linux/random.h>
  37#include <linux/string.h>
  38#include <linux/log2.h>
  39#include <linux/inetdevice.h>
  40#include <net/addrconf.h>
  41
  42#include <trace/events/neigh.h>
  43
  44#define DEBUG
  45#define NEIGH_DEBUG 1
  46#define neigh_dbg(level, fmt, ...)		\
  47do {						\
  48	if (level <= NEIGH_DEBUG)		\
  49		pr_debug(fmt, ##__VA_ARGS__);	\
  50} while (0)
  51
  52#define PNEIGH_HASHMASK		0xF
  53
  54static void neigh_timer_handler(struct timer_list *t);
  55static void __neigh_notify(struct neighbour *n, int type, int flags,
  56			   u32 pid);
  57static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
  58static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
  59				    struct net_device *dev);
  60
  61#ifdef CONFIG_PROC_FS
  62static const struct seq_operations neigh_stat_seq_ops;
  63#endif
  64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  65/*
  66   Neighbour hash table buckets are protected with rwlock tbl->lock.
  67
  68   - All the scans/updates to hash buckets MUST be made under this lock.
  69   - NOTHING clever should be made under this lock: no callbacks
  70     to protocol backends, no attempts to send something to network.
  71     It will result in deadlocks, if backend/driver wants to use neighbour
  72     cache.
  73   - If the entry requires some non-trivial actions, increase
  74     its reference count and release table lock.
  75
  76   Neighbour entries are protected:
  77   - with reference count.
  78   - with rwlock neigh->lock
  79
  80   Reference count prevents destruction.
  81
  82   neigh->lock mainly serializes ll address data and its validity state.
  83   However, the same lock is used to protect another entry fields:
  84    - timer
  85    - resolution queue
  86
  87   Again, nothing clever shall be made under neigh->lock,
  88   the most complicated procedure, which we allow is dev->hard_header.
  89   It is supposed, that dev->hard_header is simplistic and does
  90   not make callbacks to neighbour tables.
  91 */
  92
  93static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
  94{
  95	kfree_skb(skb);
  96	return -ENETDOWN;
  97}
  98
  99static void neigh_cleanup_and_release(struct neighbour *neigh)
 100{
 101	if (neigh->parms->neigh_cleanup)
 102		neigh->parms->neigh_cleanup(neigh);
 103
 104	trace_neigh_cleanup_and_release(neigh, 0);
 105	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
 106	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
 107	neigh_release(neigh);
 108}
 109
 110/*
 111 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 112 * It corresponds to default IPv6 settings and is not overridable,
 113 * because it is really reasonable choice.
 114 */
 115
 116unsigned long neigh_rand_reach_time(unsigned long base)
 117{
 118	return base ? (prandom_u32() % base) + (base >> 1) : 0;
 119}
 120EXPORT_SYMBOL(neigh_rand_reach_time);
 121
 122static void neigh_mark_dead(struct neighbour *n)
 123{
 124	n->dead = 1;
 125	if (!list_empty(&n->gc_list)) {
 126		list_del_init(&n->gc_list);
 127		atomic_dec(&n->tbl->gc_entries);
 128	}
 
 
 129}
 130
 131static void neigh_update_gc_list(struct neighbour *n)
 132{
 133	bool on_gc_list, exempt_from_gc;
 134
 135	write_lock_bh(&n->tbl->lock);
 136	write_lock(&n->lock);
 
 
 137
 138	/* remove from the gc list if new state is permanent or if neighbor
 139	 * is externally learned; otherwise entry should be on the gc list
 140	 */
 141	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
 142			 n->flags & NTF_EXT_LEARNED;
 143	on_gc_list = !list_empty(&n->gc_list);
 144
 145	if (exempt_from_gc && on_gc_list) {
 146		list_del_init(&n->gc_list);
 147		atomic_dec(&n->tbl->gc_entries);
 148	} else if (!exempt_from_gc && !on_gc_list) {
 149		/* add entries to the tail; cleaning removes from the front */
 150		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 151		atomic_inc(&n->tbl->gc_entries);
 152	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 153
 
 
 
 
 
 
 
 
 154	write_unlock(&n->lock);
 155	write_unlock_bh(&n->tbl->lock);
 156}
 157
 158static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
 159				     int *notify)
 160{
 161	bool rc = false;
 162	u8 ndm_flags;
 163
 164	if (!(flags & NEIGH_UPDATE_F_ADMIN))
 165		return rc;
 166
 167	ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
 168	if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
 
 
 169		if (ndm_flags & NTF_EXT_LEARNED)
 170			neigh->flags |= NTF_EXT_LEARNED;
 171		else
 172			neigh->flags &= ~NTF_EXT_LEARNED;
 173		rc = true;
 174		*notify = 1;
 
 
 
 
 
 
 
 
 
 175	}
 176
 177	return rc;
 178}
 179
 180static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
 181		      struct neigh_table *tbl)
 182{
 183	bool retval = false;
 184
 185	write_lock(&n->lock);
 186	if (refcount_read(&n->refcnt) == 1) {
 187		struct neighbour *neigh;
 188
 189		neigh = rcu_dereference_protected(n->next,
 190						  lockdep_is_held(&tbl->lock));
 191		rcu_assign_pointer(*np, neigh);
 192		neigh_mark_dead(n);
 193		retval = true;
 194	}
 195	write_unlock(&n->lock);
 196	if (retval)
 197		neigh_cleanup_and_release(n);
 198	return retval;
 199}
 200
 201bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
 202{
 203	struct neigh_hash_table *nht;
 204	void *pkey = ndel->primary_key;
 205	u32 hash_val;
 206	struct neighbour *n;
 207	struct neighbour __rcu **np;
 208
 209	nht = rcu_dereference_protected(tbl->nht,
 210					lockdep_is_held(&tbl->lock));
 211	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
 212	hash_val = hash_val >> (32 - nht->hash_shift);
 213
 214	np = &nht->hash_buckets[hash_val];
 215	while ((n = rcu_dereference_protected(*np,
 216					      lockdep_is_held(&tbl->lock)))) {
 217		if (n == ndel)
 218			return neigh_del(n, np, tbl);
 219		np = &n->next;
 220	}
 221	return false;
 222}
 223
 224static int neigh_forced_gc(struct neigh_table *tbl)
 225{
 226	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
 
 
 227	unsigned long tref = jiffies - 5 * HZ;
 228	struct neighbour *n, *tmp;
 229	int shrunk = 0;
 
 230
 231	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 232
 233	write_lock_bh(&tbl->lock);
 234
 235	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
 236		if (refcount_read(&n->refcnt) == 1) {
 237			bool remove = false;
 238
 239			write_lock(&n->lock);
 240			if ((n->nud_state == NUD_FAILED) ||
 241			    time_after(tref, n->updated))
 
 
 
 242				remove = true;
 243			write_unlock(&n->lock);
 244
 245			if (remove && neigh_remove_one(n, tbl))
 246				shrunk++;
 247			if (shrunk >= max_clean)
 248				break;
 
 
 
 
 
 249		}
 250	}
 251
 252	tbl->last_flush = jiffies;
 253
 254	write_unlock_bh(&tbl->lock);
 255
 256	return shrunk;
 257}
 258
 259static void neigh_add_timer(struct neighbour *n, unsigned long when)
 260{
 
 
 
 
 
 
 261	neigh_hold(n);
 
 
 
 
 262	if (unlikely(mod_timer(&n->timer, when))) {
 263		printk("NEIGH: BUG, double timer add, state is %x\n",
 264		       n->nud_state);
 265		dump_stack();
 266	}
 267}
 268
 269static int neigh_del_timer(struct neighbour *n)
 270{
 271	if ((n->nud_state & NUD_IN_TIMER) &&
 272	    del_timer(&n->timer)) {
 273		neigh_release(n);
 274		return 1;
 275	}
 276	return 0;
 277}
 278
 279static void pneigh_queue_purge(struct sk_buff_head *list)
 
 280{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281	struct sk_buff *skb;
 282
 283	while ((skb = skb_dequeue(list)) != NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284		dev_put(skb->dev);
 285		kfree_skb(skb);
 286	}
 287}
 288
 289static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
 290			    bool skip_perm)
 291{
 292	int i;
 293	struct neigh_hash_table *nht;
 
 294
 295	nht = rcu_dereference_protected(tbl->nht,
 296					lockdep_is_held(&tbl->lock));
 297
 298	for (i = 0; i < (1 << nht->hash_shift); i++) {
 299		struct neighbour *n;
 300		struct neighbour __rcu **np = &nht->hash_buckets[i];
 301
 302		while ((n = rcu_dereference_protected(*np,
 303					lockdep_is_held(&tbl->lock))) != NULL) {
 304			if (dev && n->dev != dev) {
 305				np = &n->next;
 306				continue;
 307			}
 308			if (skip_perm && n->nud_state & NUD_PERMANENT) {
 309				np = &n->next;
 310				continue;
 311			}
 312			rcu_assign_pointer(*np,
 313				   rcu_dereference_protected(n->next,
 314						lockdep_is_held(&tbl->lock)));
 315			write_lock(&n->lock);
 316			neigh_del_timer(n);
 317			neigh_mark_dead(n);
 318			if (refcount_read(&n->refcnt) != 1) {
 319				/* The most unpleasant situation.
 320				   We must destroy neighbour entry,
 321				   but someone still uses it.
 322
 323				   The destroy will be delayed until
 324				   the last user releases us, but
 325				   we must kill timers etc. and move
 326				   it to safe state.
 327				 */
 328				__skb_queue_purge(&n->arp_queue);
 329				n->arp_queue_len_bytes = 0;
 330				n->output = neigh_blackhole;
 331				if (n->nud_state & NUD_VALID)
 332					n->nud_state = NUD_NOARP;
 333				else
 334					n->nud_state = NUD_NONE;
 335				neigh_dbg(2, "neigh %p is stray\n", n);
 336			}
 337			write_unlock(&n->lock);
 338			neigh_cleanup_and_release(n);
 339		}
 
 
 340	}
 341}
 342
 343void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 344{
 345	write_lock_bh(&tbl->lock);
 346	neigh_flush_dev(tbl, dev, false);
 347	write_unlock_bh(&tbl->lock);
 348}
 349EXPORT_SYMBOL(neigh_changeaddr);
 350
 351static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
 352			  bool skip_perm)
 353{
 354	write_lock_bh(&tbl->lock);
 355	neigh_flush_dev(tbl, dev, skip_perm);
 356	pneigh_ifdown_and_unlock(tbl, dev);
 357
 358	del_timer_sync(&tbl->proxy_timer);
 359	pneigh_queue_purge(&tbl->proxy_queue);
 
 360	return 0;
 361}
 362
 363int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
 364{
 365	__neigh_ifdown(tbl, dev, true);
 366	return 0;
 367}
 368EXPORT_SYMBOL(neigh_carrier_down);
 369
 370int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 371{
 372	__neigh_ifdown(tbl, dev, false);
 373	return 0;
 374}
 375EXPORT_SYMBOL(neigh_ifdown);
 376
 377static struct neighbour *neigh_alloc(struct neigh_table *tbl,
 378				     struct net_device *dev,
 379				     bool exempt_from_gc)
 380{
 381	struct neighbour *n = NULL;
 382	unsigned long now = jiffies;
 383	int entries;
 384
 385	if (exempt_from_gc)
 386		goto do_alloc;
 387
 388	entries = atomic_inc_return(&tbl->gc_entries) - 1;
 389	if (entries >= tbl->gc_thresh3 ||
 390	    (entries >= tbl->gc_thresh2 &&
 391	     time_after(now, tbl->last_flush + 5 * HZ))) {
 392		if (!neigh_forced_gc(tbl) &&
 393		    entries >= tbl->gc_thresh3) {
 394			net_info_ratelimited("%s: neighbor table overflow!\n",
 395					     tbl->id);
 396			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 397			goto out_entries;
 398		}
 399	}
 400
 401do_alloc:
 402	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 403	if (!n)
 404		goto out_entries;
 405
 406	__skb_queue_head_init(&n->arp_queue);
 407	rwlock_init(&n->lock);
 408	seqlock_init(&n->ha_lock);
 409	n->updated	  = n->used = now;
 410	n->nud_state	  = NUD_NONE;
 411	n->output	  = neigh_blackhole;
 
 412	seqlock_init(&n->hh.hh_lock);
 413	n->parms	  = neigh_parms_clone(&tbl->parms);
 414	timer_setup(&n->timer, neigh_timer_handler, 0);
 415
 416	NEIGH_CACHE_STAT_INC(tbl, allocs);
 417	n->tbl		  = tbl;
 418	refcount_set(&n->refcnt, 1);
 419	n->dead		  = 1;
 420	INIT_LIST_HEAD(&n->gc_list);
 
 421
 422	atomic_inc(&tbl->entries);
 423out:
 424	return n;
 425
 426out_entries:
 427	if (!exempt_from_gc)
 428		atomic_dec(&tbl->gc_entries);
 429	goto out;
 430}
 431
 432static void neigh_get_hash_rnd(u32 *x)
 433{
 434	*x = get_random_u32() | 1;
 435}
 436
 437static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 438{
 439	size_t size = (1 << shift) * sizeof(struct neighbour *);
 
 440	struct neigh_hash_table *ret;
 441	struct neighbour __rcu **buckets;
 442	int i;
 443
 444	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 445	if (!ret)
 446		return NULL;
 447	if (size <= PAGE_SIZE) {
 448		buckets = kzalloc(size, GFP_ATOMIC);
 449	} else {
 450		buckets = (struct neighbour __rcu **)
 451			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 452					   get_order(size));
 453		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
 454	}
 455	if (!buckets) {
 456		kfree(ret);
 457		return NULL;
 458	}
 459	ret->hash_buckets = buckets;
 460	ret->hash_shift = shift;
 461	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 462		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 463	return ret;
 464}
 465
 466static void neigh_hash_free_rcu(struct rcu_head *head)
 467{
 468	struct neigh_hash_table *nht = container_of(head,
 469						    struct neigh_hash_table,
 470						    rcu);
 471	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
 472	struct neighbour __rcu **buckets = nht->hash_buckets;
 473
 474	if (size <= PAGE_SIZE) {
 475		kfree(buckets);
 476	} else {
 477		kmemleak_free(buckets);
 478		free_pages((unsigned long)buckets, get_order(size));
 479	}
 480	kfree(nht);
 481}
 482
 483static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 484						unsigned long new_shift)
 485{
 486	unsigned int i, hash;
 487	struct neigh_hash_table *new_nht, *old_nht;
 488
 489	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 490
 491	old_nht = rcu_dereference_protected(tbl->nht,
 492					    lockdep_is_held(&tbl->lock));
 493	new_nht = neigh_hash_alloc(new_shift);
 494	if (!new_nht)
 495		return old_nht;
 496
 497	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 498		struct neighbour *n, *next;
 
 499
 500		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
 501						   lockdep_is_held(&tbl->lock));
 502		     n != NULL;
 503		     n = next) {
 504			hash = tbl->hash(n->primary_key, n->dev,
 505					 new_nht->hash_rnd);
 506
 507			hash >>= (32 - new_nht->hash_shift);
 508			next = rcu_dereference_protected(n->next,
 509						lockdep_is_held(&tbl->lock));
 510
 511			rcu_assign_pointer(n->next,
 512					   rcu_dereference_protected(
 513						new_nht->hash_buckets[hash],
 514						lockdep_is_held(&tbl->lock)));
 515			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
 516		}
 517	}
 518
 519	rcu_assign_pointer(tbl->nht, new_nht);
 520	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 521	return new_nht;
 522}
 523
 524struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 525			       struct net_device *dev)
 526{
 527	struct neighbour *n;
 528
 529	NEIGH_CACHE_STAT_INC(tbl, lookups);
 530
 531	rcu_read_lock_bh();
 532	n = __neigh_lookup_noref(tbl, pkey, dev);
 533	if (n) {
 534		if (!refcount_inc_not_zero(&n->refcnt))
 535			n = NULL;
 536		NEIGH_CACHE_STAT_INC(tbl, hits);
 537	}
 538
 539	rcu_read_unlock_bh();
 540	return n;
 541}
 542EXPORT_SYMBOL(neigh_lookup);
 543
 544struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 545				     const void *pkey)
 
 
 546{
 547	struct neighbour *n;
 548	unsigned int key_len = tbl->key_len;
 549	u32 hash_val;
 550	struct neigh_hash_table *nht;
 551
 552	NEIGH_CACHE_STAT_INC(tbl, lookups);
 553
 554	rcu_read_lock_bh();
 555	nht = rcu_dereference_bh(tbl->nht);
 556	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
 557
 558	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 559	     n != NULL;
 560	     n = rcu_dereference_bh(n->next)) {
 561		if (!memcmp(n->primary_key, pkey, key_len) &&
 562		    net_eq(dev_net(n->dev), net)) {
 563			if (!refcount_inc_not_zero(&n->refcnt))
 564				n = NULL;
 565			NEIGH_CACHE_STAT_INC(tbl, hits);
 566			break;
 567		}
 568	}
 569
 570	rcu_read_unlock_bh();
 571	return n;
 572}
 573EXPORT_SYMBOL(neigh_lookup_nodev);
 574
 575static struct neighbour *___neigh_create(struct neigh_table *tbl,
 576					 const void *pkey,
 577					 struct net_device *dev,
 578					 bool exempt_from_gc, bool want_ref)
 579{
 580	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
 581	u32 hash_val;
 582	unsigned int key_len = tbl->key_len;
 583	int error;
 584	struct neigh_hash_table *nht;
 585
 
 586	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
 587
 588	if (!n) {
 589		rc = ERR_PTR(-ENOBUFS);
 590		goto out;
 591	}
 592
 593	memcpy(n->primary_key, pkey, key_len);
 594	n->dev = dev;
 595	dev_hold(dev);
 596
 597	/* Protocol specific setup. */
 598	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 599		rc = ERR_PTR(error);
 600		goto out_neigh_release;
 601	}
 602
 603	if (dev->netdev_ops->ndo_neigh_construct) {
 604		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
 605		if (error < 0) {
 606			rc = ERR_PTR(error);
 607			goto out_neigh_release;
 608		}
 609	}
 610
 611	/* Device specific setup. */
 612	if (n->parms->neigh_setup &&
 613	    (error = n->parms->neigh_setup(n)) < 0) {
 614		rc = ERR_PTR(error);
 615		goto out_neigh_release;
 616	}
 617
 618	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 619
 620	write_lock_bh(&tbl->lock);
 621	nht = rcu_dereference_protected(tbl->nht,
 622					lockdep_is_held(&tbl->lock));
 623
 624	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 625		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 626
 627	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 628
 629	if (n->parms->dead) {
 630		rc = ERR_PTR(-EINVAL);
 631		goto out_tbl_unlock;
 632	}
 633
 634	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
 635					    lockdep_is_held(&tbl->lock));
 636	     n1 != NULL;
 637	     n1 = rcu_dereference_protected(n1->next,
 638			lockdep_is_held(&tbl->lock))) {
 639		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
 640			if (want_ref)
 641				neigh_hold(n1);
 642			rc = n1;
 643			goto out_tbl_unlock;
 644		}
 645	}
 646
 647	n->dead = 0;
 648	if (!exempt_from_gc)
 649		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 650
 
 651	if (want_ref)
 652		neigh_hold(n);
 653	rcu_assign_pointer(n->next,
 654			   rcu_dereference_protected(nht->hash_buckets[hash_val],
 655						     lockdep_is_held(&tbl->lock)));
 656	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
 
 657	write_unlock_bh(&tbl->lock);
 658	neigh_dbg(2, "neigh %p is created\n", n);
 659	rc = n;
 660out:
 661	return rc;
 662out_tbl_unlock:
 663	write_unlock_bh(&tbl->lock);
 664out_neigh_release:
 665	if (!exempt_from_gc)
 666		atomic_dec(&tbl->gc_entries);
 667	neigh_release(n);
 668	goto out;
 669}
 670
 671struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 672				 struct net_device *dev, bool want_ref)
 673{
 674	return ___neigh_create(tbl, pkey, dev, false, want_ref);
 
 
 675}
 676EXPORT_SYMBOL(__neigh_create);
 677
 678static u32 pneigh_hash(const void *pkey, unsigned int key_len)
 679{
 680	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 681	hash_val ^= (hash_val >> 16);
 682	hash_val ^= hash_val >> 8;
 683	hash_val ^= hash_val >> 4;
 684	hash_val &= PNEIGH_HASHMASK;
 685	return hash_val;
 686}
 687
 688static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 689					      struct net *net,
 690					      const void *pkey,
 691					      unsigned int key_len,
 692					      struct net_device *dev)
 693{
 694	while (n) {
 695		if (!memcmp(n->key, pkey, key_len) &&
 696		    net_eq(pneigh_net(n), net) &&
 697		    (n->dev == dev || !n->dev))
 698			return n;
 699		n = n->next;
 700	}
 701	return NULL;
 702}
 703
 704struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 705		struct net *net, const void *pkey, struct net_device *dev)
 706{
 707	unsigned int key_len = tbl->key_len;
 708	u32 hash_val = pneigh_hash(pkey, key_len);
 709
 710	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 711				 net, pkey, key_len, dev);
 712}
 713EXPORT_SYMBOL_GPL(__pneigh_lookup);
 714
 715struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 716				    struct net *net, const void *pkey,
 717				    struct net_device *dev, int creat)
 718{
 719	struct pneigh_entry *n;
 720	unsigned int key_len = tbl->key_len;
 721	u32 hash_val = pneigh_hash(pkey, key_len);
 722
 723	read_lock_bh(&tbl->lock);
 724	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 725			      net, pkey, key_len, dev);
 726	read_unlock_bh(&tbl->lock);
 727
 728	if (n || !creat)
 729		goto out;
 730
 731	ASSERT_RTNL();
 732
 733	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
 734	if (!n)
 735		goto out;
 736
 737	n->protocol = 0;
 738	write_pnet(&n->net, net);
 739	memcpy(n->key, pkey, key_len);
 740	n->dev = dev;
 741	if (dev)
 742		dev_hold(dev);
 743
 744	if (tbl->pconstructor && tbl->pconstructor(n)) {
 745		if (dev)
 746			dev_put(dev);
 747		kfree(n);
 748		n = NULL;
 749		goto out;
 750	}
 751
 752	write_lock_bh(&tbl->lock);
 753	n->next = tbl->phash_buckets[hash_val];
 754	tbl->phash_buckets[hash_val] = n;
 755	write_unlock_bh(&tbl->lock);
 756out:
 757	return n;
 758}
 759EXPORT_SYMBOL(pneigh_lookup);
 760
 761
 762int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 763		  struct net_device *dev)
 764{
 765	struct pneigh_entry *n, **np;
 766	unsigned int key_len = tbl->key_len;
 767	u32 hash_val = pneigh_hash(pkey, key_len);
 768
 769	write_lock_bh(&tbl->lock);
 770	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 771	     np = &n->next) {
 772		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 773		    net_eq(pneigh_net(n), net)) {
 774			*np = n->next;
 775			write_unlock_bh(&tbl->lock);
 776			if (tbl->pdestructor)
 777				tbl->pdestructor(n);
 778			if (n->dev)
 779				dev_put(n->dev);
 780			kfree(n);
 781			return 0;
 782		}
 783	}
 784	write_unlock_bh(&tbl->lock);
 785	return -ENOENT;
 786}
 787
 788static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
 789				    struct net_device *dev)
 790{
 791	struct pneigh_entry *n, **np, *freelist = NULL;
 792	u32 h;
 793
 794	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 795		np = &tbl->phash_buckets[h];
 796		while ((n = *np) != NULL) {
 797			if (!dev || n->dev == dev) {
 798				*np = n->next;
 799				n->next = freelist;
 800				freelist = n;
 801				continue;
 802			}
 803			np = &n->next;
 804		}
 805	}
 806	write_unlock_bh(&tbl->lock);
 807	while ((n = freelist)) {
 808		freelist = n->next;
 809		n->next = NULL;
 810		if (tbl->pdestructor)
 811			tbl->pdestructor(n);
 812		if (n->dev)
 813			dev_put(n->dev);
 814		kfree(n);
 815	}
 816	return -ENOENT;
 817}
 818
 819static void neigh_parms_destroy(struct neigh_parms *parms);
 820
 821static inline void neigh_parms_put(struct neigh_parms *parms)
 822{
 823	if (refcount_dec_and_test(&parms->refcnt))
 824		neigh_parms_destroy(parms);
 825}
 826
 827/*
 828 *	neighbour must already be out of the table;
 829 *
 830 */
 831void neigh_destroy(struct neighbour *neigh)
 832{
 833	struct net_device *dev = neigh->dev;
 834
 835	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 836
 837	if (!neigh->dead) {
 838		pr_warn("Destroying alive neighbour %p\n", neigh);
 839		dump_stack();
 840		return;
 841	}
 842
 843	if (neigh_del_timer(neigh))
 844		pr_warn("Impossible event\n");
 845
 846	write_lock_bh(&neigh->lock);
 847	__skb_queue_purge(&neigh->arp_queue);
 848	write_unlock_bh(&neigh->lock);
 849	neigh->arp_queue_len_bytes = 0;
 850
 851	if (dev->netdev_ops->ndo_neigh_destroy)
 852		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
 853
 854	dev_put(dev);
 855	neigh_parms_put(neigh->parms);
 856
 857	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 858
 859	atomic_dec(&neigh->tbl->entries);
 860	kfree_rcu(neigh, rcu);
 861}
 862EXPORT_SYMBOL(neigh_destroy);
 863
 864/* Neighbour state is suspicious;
 865   disable fast path.
 866
 867   Called with write_locked neigh.
 868 */
 869static void neigh_suspect(struct neighbour *neigh)
 870{
 871	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 872
 873	neigh->output = neigh->ops->output;
 874}
 875
 876/* Neighbour state is OK;
 877   enable fast path.
 878
 879   Called with write_locked neigh.
 880 */
 881static void neigh_connect(struct neighbour *neigh)
 882{
 883	neigh_dbg(2, "neigh %p is connected\n", neigh);
 884
 885	neigh->output = neigh->ops->connected_output;
 886}
 887
 888static void neigh_periodic_work(struct work_struct *work)
 889{
 890	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 
 
 891	struct neighbour *n;
 892	struct neighbour __rcu **np;
 893	unsigned int i;
 894	struct neigh_hash_table *nht;
 895
 896	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 897
 898	write_lock_bh(&tbl->lock);
 899	nht = rcu_dereference_protected(tbl->nht,
 900					lockdep_is_held(&tbl->lock));
 901
 902	/*
 903	 *	periodically recompute ReachableTime from random function
 904	 */
 905
 906	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 907		struct neigh_parms *p;
 908		tbl->last_rand = jiffies;
 
 909		list_for_each_entry(p, &tbl->parms_list, list)
 910			p->reachable_time =
 911				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 912	}
 913
 914	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
 915		goto out;
 916
 917	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 918		np = &nht->hash_buckets[i];
 919
 920		while ((n = rcu_dereference_protected(*np,
 921				lockdep_is_held(&tbl->lock))) != NULL) {
 922			unsigned int state;
 923
 924			write_lock(&n->lock);
 925
 926			state = n->nud_state;
 927			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
 928			    (n->flags & NTF_EXT_LEARNED)) {
 929				write_unlock(&n->lock);
 930				goto next_elt;
 931			}
 932
 933			if (time_before(n->used, n->confirmed))
 
 934				n->used = n->confirmed;
 935
 936			if (refcount_read(&n->refcnt) == 1 &&
 937			    (state == NUD_FAILED ||
 938			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 939				*np = n->next;
 
 
 940				neigh_mark_dead(n);
 941				write_unlock(&n->lock);
 942				neigh_cleanup_and_release(n);
 943				continue;
 944			}
 945			write_unlock(&n->lock);
 946
 947next_elt:
 948			np = &n->next;
 949		}
 950		/*
 951		 * It's fine to release lock here, even if hash table
 952		 * grows while we are preempted.
 953		 */
 954		write_unlock_bh(&tbl->lock);
 955		cond_resched();
 956		write_lock_bh(&tbl->lock);
 957		nht = rcu_dereference_protected(tbl->nht,
 958						lockdep_is_held(&tbl->lock));
 959	}
 960out:
 961	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 962	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 963	 * BASE_REACHABLE_TIME.
 964	 */
 965	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 966			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 967	write_unlock_bh(&tbl->lock);
 968}
 969
 970static __inline__ int neigh_max_probes(struct neighbour *n)
 971{
 972	struct neigh_parms *p = n->parms;
 973	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 974	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 975	        NEIGH_VAR(p, MCAST_PROBES));
 976}
 977
 978static void neigh_invalidate(struct neighbour *neigh)
 979	__releases(neigh->lock)
 980	__acquires(neigh->lock)
 981{
 982	struct sk_buff *skb;
 983
 984	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 985	neigh_dbg(2, "neigh %p is failed\n", neigh);
 986	neigh->updated = jiffies;
 987
 988	/* It is very thin place. report_unreachable is very complicated
 989	   routine. Particularly, it can hit the same neighbour entry!
 990
 991	   So that, we try to be accurate and avoid dead loop. --ANK
 992	 */
 993	while (neigh->nud_state == NUD_FAILED &&
 994	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
 995		write_unlock(&neigh->lock);
 996		neigh->ops->error_report(neigh, skb);
 997		write_lock(&neigh->lock);
 998	}
 999	__skb_queue_purge(&neigh->arp_queue);
1000	neigh->arp_queue_len_bytes = 0;
1001}
1002
1003static void neigh_probe(struct neighbour *neigh)
1004	__releases(neigh->lock)
1005{
1006	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1007	/* keep skb alive even if arp_queue overflows */
1008	if (skb)
1009		skb = skb_clone(skb, GFP_ATOMIC);
1010	write_unlock(&neigh->lock);
1011	if (neigh->ops->solicit)
1012		neigh->ops->solicit(neigh, skb);
1013	atomic_inc(&neigh->probes);
1014	consume_skb(skb);
1015}
1016
1017/* Called when a timer expires for a neighbour entry. */
1018
1019static void neigh_timer_handler(struct timer_list *t)
1020{
1021	unsigned long now, next;
1022	struct neighbour *neigh = from_timer(neigh, t, timer);
1023	unsigned int state;
1024	int notify = 0;
1025
1026	write_lock(&neigh->lock);
1027
1028	state = neigh->nud_state;
1029	now = jiffies;
1030	next = now + HZ;
1031
1032	if (!(state & NUD_IN_TIMER))
1033		goto out;
1034
1035	if (state & NUD_REACHABLE) {
1036		if (time_before_eq(now,
1037				   neigh->confirmed + neigh->parms->reachable_time)) {
1038			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1039			next = neigh->confirmed + neigh->parms->reachable_time;
1040		} else if (time_before_eq(now,
1041					  neigh->used +
1042					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1043			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1044			neigh->nud_state = NUD_DELAY;
1045			neigh->updated = jiffies;
1046			neigh_suspect(neigh);
1047			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1048		} else {
1049			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1050			neigh->nud_state = NUD_STALE;
1051			neigh->updated = jiffies;
1052			neigh_suspect(neigh);
1053			notify = 1;
1054		}
1055	} else if (state & NUD_DELAY) {
1056		if (time_before_eq(now,
1057				   neigh->confirmed +
1058				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1059			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1060			neigh->nud_state = NUD_REACHABLE;
1061			neigh->updated = jiffies;
1062			neigh_connect(neigh);
1063			notify = 1;
1064			next = neigh->confirmed + neigh->parms->reachable_time;
1065		} else {
1066			neigh_dbg(2, "neigh %p is probed\n", neigh);
1067			neigh->nud_state = NUD_PROBE;
1068			neigh->updated = jiffies;
1069			atomic_set(&neigh->probes, 0);
1070			notify = 1;
1071			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
 
1072		}
1073	} else {
1074		/* NUD_PROBE|NUD_INCOMPLETE */
1075		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
1076	}
1077
1078	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1079	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1080		neigh->nud_state = NUD_FAILED;
1081		notify = 1;
1082		neigh_invalidate(neigh);
1083		goto out;
1084	}
1085
1086	if (neigh->nud_state & NUD_IN_TIMER) {
1087		if (time_before(next, jiffies + HZ/2))
1088			next = jiffies + HZ/2;
1089		if (!mod_timer(&neigh->timer, next))
1090			neigh_hold(neigh);
1091	}
1092	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1093		neigh_probe(neigh);
1094	} else {
1095out:
1096		write_unlock(&neigh->lock);
1097	}
1098
1099	if (notify)
1100		neigh_update_notify(neigh, 0);
1101
1102	trace_neigh_timer_handler(neigh, 0);
1103
1104	neigh_release(neigh);
1105}
1106
1107int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
1108{
1109	int rc;
1110	bool immediate_probe = false;
1111
1112	write_lock_bh(&neigh->lock);
1113
1114	rc = 0;
1115	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1116		goto out_unlock_bh;
1117	if (neigh->dead)
1118		goto out_dead;
1119
1120	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1121		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1122		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1123			unsigned long next, now = jiffies;
1124
1125			atomic_set(&neigh->probes,
1126				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1127			neigh_del_timer(neigh);
1128			neigh->nud_state     = NUD_INCOMPLETE;
1129			neigh->updated = now;
1130			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1131					 HZ/2);
 
 
 
 
 
 
1132			neigh_add_timer(neigh, next);
1133			immediate_probe = true;
1134		} else {
1135			neigh->nud_state = NUD_FAILED;
1136			neigh->updated = jiffies;
1137			write_unlock_bh(&neigh->lock);
1138
1139			kfree_skb(skb);
1140			return 1;
1141		}
1142	} else if (neigh->nud_state & NUD_STALE) {
1143		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1144		neigh_del_timer(neigh);
1145		neigh->nud_state = NUD_DELAY;
1146		neigh->updated = jiffies;
1147		neigh_add_timer(neigh, jiffies +
1148				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1149	}
1150
1151	if (neigh->nud_state == NUD_INCOMPLETE) {
1152		if (skb) {
1153			while (neigh->arp_queue_len_bytes + skb->truesize >
1154			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1155				struct sk_buff *buff;
1156
1157				buff = __skb_dequeue(&neigh->arp_queue);
1158				if (!buff)
1159					break;
1160				neigh->arp_queue_len_bytes -= buff->truesize;
1161				kfree_skb(buff);
1162				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1163			}
1164			skb_dst_force(skb);
1165			__skb_queue_tail(&neigh->arp_queue, skb);
1166			neigh->arp_queue_len_bytes += skb->truesize;
1167		}
1168		rc = 1;
1169	}
1170out_unlock_bh:
1171	if (immediate_probe)
1172		neigh_probe(neigh);
1173	else
1174		write_unlock(&neigh->lock);
1175	local_bh_enable();
1176	trace_neigh_event_send_done(neigh, rc);
1177	return rc;
1178
1179out_dead:
1180	if (neigh->nud_state & NUD_STALE)
1181		goto out_unlock_bh;
1182	write_unlock_bh(&neigh->lock);
1183	kfree_skb(skb);
1184	trace_neigh_event_send_dead(neigh, 1);
1185	return 1;
1186}
1187EXPORT_SYMBOL(__neigh_event_send);
1188
1189static void neigh_update_hhs(struct neighbour *neigh)
1190{
1191	struct hh_cache *hh;
1192	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1193		= NULL;
1194
1195	if (neigh->dev->header_ops)
1196		update = neigh->dev->header_ops->cache_update;
1197
1198	if (update) {
1199		hh = &neigh->hh;
1200		if (hh->hh_len) {
1201			write_seqlock_bh(&hh->hh_lock);
1202			update(hh, neigh->dev, neigh->ha);
1203			write_sequnlock_bh(&hh->hh_lock);
1204		}
1205	}
1206}
1207
1208
1209
1210/* Generic update routine.
1211   -- lladdr is new lladdr or NULL, if it is not supplied.
1212   -- new    is new state.
1213   -- flags
1214	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1215				if it is different.
1216	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1217				lladdr instead of overriding it
1218				if it is different.
1219	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1220
 
1221	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1222				NTF_ROUTER flag.
1223	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1224				a router.
1225
1226   Caller MUST hold reference count on the entry.
1227 */
1228
1229static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1230			  u8 new, u32 flags, u32 nlmsg_pid,
1231			  struct netlink_ext_ack *extack)
1232{
1233	bool ext_learn_change = false;
1234	u8 old;
1235	int err;
1236	int notify = 0;
1237	struct net_device *dev;
1238	int update_isrouter = 0;
 
 
 
1239
1240	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1241
1242	write_lock_bh(&neigh->lock);
1243
1244	dev    = neigh->dev;
1245	old    = neigh->nud_state;
1246	err    = -EPERM;
1247
1248	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1249	    (old & (NUD_NOARP | NUD_PERMANENT)))
1250		goto out;
1251	if (neigh->dead) {
1252		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
 
1253		goto out;
1254	}
 
 
 
1255
1256	ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
 
 
 
 
 
 
1257
1258	if (!(new & NUD_VALID)) {
1259		neigh_del_timer(neigh);
1260		if (old & NUD_CONNECTED)
1261			neigh_suspect(neigh);
1262		neigh->nud_state = new;
1263		err = 0;
1264		notify = old & NUD_VALID;
1265		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1266		    (new & NUD_FAILED)) {
1267			neigh_invalidate(neigh);
1268			notify = 1;
1269		}
1270		goto out;
1271	}
1272
1273	/* Compare new lladdr with cached one */
1274	if (!dev->addr_len) {
1275		/* First case: device needs no address. */
1276		lladdr = neigh->ha;
1277	} else if (lladdr) {
1278		/* The second case: if something is already cached
1279		   and a new address is proposed:
1280		   - compare new & old
1281		   - if they are different, check override flag
1282		 */
1283		if ((old & NUD_VALID) &&
1284		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1285			lladdr = neigh->ha;
1286	} else {
1287		/* No address is supplied; if we know something,
1288		   use it, otherwise discard the request.
1289		 */
1290		err = -EINVAL;
1291		if (!(old & NUD_VALID)) {
1292			NL_SET_ERR_MSG(extack, "No link layer address given");
1293			goto out;
1294		}
1295		lladdr = neigh->ha;
1296	}
1297
1298	/* Update confirmed timestamp for neighbour entry after we
1299	 * received ARP packet even if it doesn't change IP to MAC binding.
1300	 */
1301	if (new & NUD_CONNECTED)
1302		neigh->confirmed = jiffies;
1303
1304	/* If entry was valid and address is not changed,
1305	   do not change entry state, if new one is STALE.
1306	 */
1307	err = 0;
1308	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1309	if (old & NUD_VALID) {
1310		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1311			update_isrouter = 0;
1312			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1313			    (old & NUD_CONNECTED)) {
1314				lladdr = neigh->ha;
1315				new = NUD_STALE;
1316			} else
1317				goto out;
1318		} else {
1319			if (lladdr == neigh->ha && new == NUD_STALE &&
1320			    !(flags & NEIGH_UPDATE_F_ADMIN))
1321				new = old;
1322		}
1323	}
1324
1325	/* Update timestamp only once we know we will make a change to the
1326	 * neighbour entry. Otherwise we risk to move the locktime window with
1327	 * noop updates and ignore relevant ARP updates.
1328	 */
1329	if (new != old || lladdr != neigh->ha)
1330		neigh->updated = jiffies;
1331
1332	if (new != old) {
1333		neigh_del_timer(neigh);
1334		if (new & NUD_PROBE)
1335			atomic_set(&neigh->probes, 0);
1336		if (new & NUD_IN_TIMER)
1337			neigh_add_timer(neigh, (jiffies +
1338						((new & NUD_REACHABLE) ?
1339						 neigh->parms->reachable_time :
1340						 0)));
1341		neigh->nud_state = new;
1342		notify = 1;
1343	}
1344
1345	if (lladdr != neigh->ha) {
1346		write_seqlock(&neigh->ha_lock);
1347		memcpy(&neigh->ha, lladdr, dev->addr_len);
1348		write_sequnlock(&neigh->ha_lock);
1349		neigh_update_hhs(neigh);
1350		if (!(new & NUD_CONNECTED))
1351			neigh->confirmed = jiffies -
1352				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1353		notify = 1;
1354	}
1355	if (new == old)
1356		goto out;
1357	if (new & NUD_CONNECTED)
1358		neigh_connect(neigh);
1359	else
1360		neigh_suspect(neigh);
1361	if (!(old & NUD_VALID)) {
1362		struct sk_buff *skb;
1363
1364		/* Again: avoid dead loop if something went wrong */
1365
1366		while (neigh->nud_state & NUD_VALID &&
1367		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1368			struct dst_entry *dst = skb_dst(skb);
1369			struct neighbour *n2, *n1 = neigh;
1370			write_unlock_bh(&neigh->lock);
1371
1372			rcu_read_lock();
1373
1374			/* Why not just use 'neigh' as-is?  The problem is that
1375			 * things such as shaper, eql, and sch_teql can end up
1376			 * using alternative, different, neigh objects to output
1377			 * the packet in the output path.  So what we need to do
1378			 * here is re-lookup the top-level neigh in the path so
1379			 * we can reinject the packet there.
1380			 */
1381			n2 = NULL;
1382			if (dst) {
1383				n2 = dst_neigh_lookup_skb(dst, skb);
1384				if (n2)
1385					n1 = n2;
1386			}
1387			n1->output(n1, skb);
1388			if (n2)
1389				neigh_release(n2);
1390			rcu_read_unlock();
1391
1392			write_lock_bh(&neigh->lock);
1393		}
1394		__skb_queue_purge(&neigh->arp_queue);
1395		neigh->arp_queue_len_bytes = 0;
1396	}
1397out:
1398	if (update_isrouter)
1399		neigh_update_is_router(neigh, flags, &notify);
1400	write_unlock_bh(&neigh->lock);
1401
1402	if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1403		neigh_update_gc_list(neigh);
1404
 
1405	if (notify)
1406		neigh_update_notify(neigh, nlmsg_pid);
1407
1408	trace_neigh_update_done(neigh, err);
1409
1410	return err;
1411}
1412
1413int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1414		 u32 flags, u32 nlmsg_pid)
1415{
1416	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1417}
1418EXPORT_SYMBOL(neigh_update);
1419
1420/* Update the neigh to listen temporarily for probe responses, even if it is
1421 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1422 */
1423void __neigh_set_probe_once(struct neighbour *neigh)
1424{
1425	if (neigh->dead)
1426		return;
1427	neigh->updated = jiffies;
1428	if (!(neigh->nud_state & NUD_FAILED))
1429		return;
1430	neigh->nud_state = NUD_INCOMPLETE;
1431	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1432	neigh_add_timer(neigh,
1433			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 
1434}
1435EXPORT_SYMBOL(__neigh_set_probe_once);
1436
1437struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1438				 u8 *lladdr, void *saddr,
1439				 struct net_device *dev)
1440{
1441	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1442						 lladdr || !dev->addr_len);
1443	if (neigh)
1444		neigh_update(neigh, lladdr, NUD_STALE,
1445			     NEIGH_UPDATE_F_OVERRIDE, 0);
1446	return neigh;
1447}
1448EXPORT_SYMBOL(neigh_event_ns);
1449
1450/* called with read_lock_bh(&n->lock); */
1451static void neigh_hh_init(struct neighbour *n)
1452{
1453	struct net_device *dev = n->dev;
1454	__be16 prot = n->tbl->protocol;
1455	struct hh_cache	*hh = &n->hh;
1456
1457	write_lock_bh(&n->lock);
1458
1459	/* Only one thread can come in here and initialize the
1460	 * hh_cache entry.
1461	 */
1462	if (!hh->hh_len)
1463		dev->header_ops->cache(n, hh, prot);
1464
1465	write_unlock_bh(&n->lock);
1466}
1467
1468/* Slow and careful. */
1469
1470int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1471{
1472	int rc = 0;
1473
1474	if (!neigh_event_send(neigh, skb)) {
1475		int err;
1476		struct net_device *dev = neigh->dev;
1477		unsigned int seq;
1478
1479		if (dev->header_ops->cache && !neigh->hh.hh_len)
1480			neigh_hh_init(neigh);
1481
1482		do {
1483			__skb_pull(skb, skb_network_offset(skb));
1484			seq = read_seqbegin(&neigh->ha_lock);
1485			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1486					      neigh->ha, NULL, skb->len);
1487		} while (read_seqretry(&neigh->ha_lock, seq));
1488
1489		if (err >= 0)
1490			rc = dev_queue_xmit(skb);
1491		else
1492			goto out_kfree_skb;
1493	}
1494out:
1495	return rc;
1496out_kfree_skb:
1497	rc = -EINVAL;
1498	kfree_skb(skb);
1499	goto out;
1500}
1501EXPORT_SYMBOL(neigh_resolve_output);
1502
1503/* As fast as possible without hh cache */
1504
1505int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1506{
1507	struct net_device *dev = neigh->dev;
1508	unsigned int seq;
1509	int err;
1510
1511	do {
1512		__skb_pull(skb, skb_network_offset(skb));
1513		seq = read_seqbegin(&neigh->ha_lock);
1514		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1515				      neigh->ha, NULL, skb->len);
1516	} while (read_seqretry(&neigh->ha_lock, seq));
1517
1518	if (err >= 0)
1519		err = dev_queue_xmit(skb);
1520	else {
1521		err = -EINVAL;
1522		kfree_skb(skb);
1523	}
1524	return err;
1525}
1526EXPORT_SYMBOL(neigh_connected_output);
1527
1528int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1529{
1530	return dev_queue_xmit(skb);
1531}
1532EXPORT_SYMBOL(neigh_direct_output);
1533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1534static void neigh_proxy_process(struct timer_list *t)
1535{
1536	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1537	long sched_next = 0;
1538	unsigned long now = jiffies;
1539	struct sk_buff *skb, *n;
1540
1541	spin_lock(&tbl->proxy_queue.lock);
1542
1543	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1544		long tdif = NEIGH_CB(skb)->sched_next - now;
1545
1546		if (tdif <= 0) {
1547			struct net_device *dev = skb->dev;
1548
 
1549			__skb_unlink(skb, &tbl->proxy_queue);
 
1550			if (tbl->proxy_redo && netif_running(dev)) {
1551				rcu_read_lock();
1552				tbl->proxy_redo(skb);
1553				rcu_read_unlock();
1554			} else {
1555				kfree_skb(skb);
1556			}
1557
1558			dev_put(dev);
1559		} else if (!sched_next || tdif < sched_next)
1560			sched_next = tdif;
1561	}
1562	del_timer(&tbl->proxy_timer);
1563	if (sched_next)
1564		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1565	spin_unlock(&tbl->proxy_queue.lock);
1566}
1567
 
 
 
 
 
 
 
 
 
 
 
1568void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1569		    struct sk_buff *skb)
1570{
1571	unsigned long now = jiffies;
1572
1573	unsigned long sched_next = now + (prandom_u32() %
1574					  NEIGH_VAR(p, PROXY_DELAY));
1575
1576	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1577		kfree_skb(skb);
1578		return;
1579	}
1580
1581	NEIGH_CB(skb)->sched_next = sched_next;
1582	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1583
1584	spin_lock(&tbl->proxy_queue.lock);
1585	if (del_timer(&tbl->proxy_timer)) {
1586		if (time_before(tbl->proxy_timer.expires, sched_next))
1587			sched_next = tbl->proxy_timer.expires;
1588	}
1589	skb_dst_drop(skb);
1590	dev_hold(skb->dev);
1591	__skb_queue_tail(&tbl->proxy_queue, skb);
 
1592	mod_timer(&tbl->proxy_timer, sched_next);
1593	spin_unlock(&tbl->proxy_queue.lock);
1594}
1595EXPORT_SYMBOL(pneigh_enqueue);
1596
1597static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1598						      struct net *net, int ifindex)
1599{
1600	struct neigh_parms *p;
1601
1602	list_for_each_entry(p, &tbl->parms_list, list) {
1603		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1604		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1605			return p;
1606	}
1607
1608	return NULL;
1609}
1610
1611struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1612				      struct neigh_table *tbl)
1613{
1614	struct neigh_parms *p;
1615	struct net *net = dev_net(dev);
1616	const struct net_device_ops *ops = dev->netdev_ops;
1617
1618	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1619	if (p) {
1620		p->tbl		  = tbl;
1621		refcount_set(&p->refcnt, 1);
1622		p->reachable_time =
1623				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1624		dev_hold(dev);
 
1625		p->dev = dev;
1626		write_pnet(&p->net, net);
1627		p->sysctl_table = NULL;
1628
1629		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1630			dev_put(dev);
1631			kfree(p);
1632			return NULL;
1633		}
1634
1635		write_lock_bh(&tbl->lock);
1636		list_add(&p->list, &tbl->parms.list);
1637		write_unlock_bh(&tbl->lock);
1638
1639		neigh_parms_data_state_cleanall(p);
1640	}
1641	return p;
1642}
1643EXPORT_SYMBOL(neigh_parms_alloc);
1644
1645static void neigh_rcu_free_parms(struct rcu_head *head)
1646{
1647	struct neigh_parms *parms =
1648		container_of(head, struct neigh_parms, rcu_head);
1649
1650	neigh_parms_put(parms);
1651}
1652
1653void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1654{
1655	if (!parms || parms == &tbl->parms)
1656		return;
1657	write_lock_bh(&tbl->lock);
1658	list_del(&parms->list);
1659	parms->dead = 1;
1660	write_unlock_bh(&tbl->lock);
1661	if (parms->dev)
1662		dev_put(parms->dev);
1663	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1664}
1665EXPORT_SYMBOL(neigh_parms_release);
1666
1667static void neigh_parms_destroy(struct neigh_parms *parms)
1668{
1669	kfree(parms);
1670}
1671
1672static struct lock_class_key neigh_table_proxy_queue_class;
1673
1674static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1675
1676void neigh_table_init(int index, struct neigh_table *tbl)
1677{
1678	unsigned long now = jiffies;
1679	unsigned long phsize;
1680
1681	INIT_LIST_HEAD(&tbl->parms_list);
1682	INIT_LIST_HEAD(&tbl->gc_list);
 
 
1683	list_add(&tbl->parms.list, &tbl->parms_list);
1684	write_pnet(&tbl->parms.net, &init_net);
1685	refcount_set(&tbl->parms.refcnt, 1);
1686	tbl->parms.reachable_time =
1687			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
 
1688
1689	tbl->stats = alloc_percpu(struct neigh_statistics);
1690	if (!tbl->stats)
1691		panic("cannot create neighbour cache statistics");
1692
1693#ifdef CONFIG_PROC_FS
1694	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1695			      &neigh_stat_seq_ops, tbl))
1696		panic("cannot create neighbour proc dir entry");
1697#endif
1698
1699	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1700
1701	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1702	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1703
1704	if (!tbl->nht || !tbl->phash_buckets)
1705		panic("cannot allocate neighbour cache hashes");
1706
1707	if (!tbl->entry_size)
1708		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1709					tbl->key_len, NEIGH_PRIV_ALIGN);
1710	else
1711		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1712
1713	rwlock_init(&tbl->lock);
 
1714	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1715	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1716			tbl->parms.reachable_time);
 
 
 
1717	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1718	skb_queue_head_init_class(&tbl->proxy_queue,
1719			&neigh_table_proxy_queue_class);
1720
1721	tbl->last_flush = now;
1722	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1723
1724	neigh_tables[index] = tbl;
1725}
1726EXPORT_SYMBOL(neigh_table_init);
1727
 
 
 
 
1728int neigh_table_clear(int index, struct neigh_table *tbl)
1729{
1730	neigh_tables[index] = NULL;
 
 
1731	/* It is not clean... Fix it to unload IPv6 module safely */
 
1732	cancel_delayed_work_sync(&tbl->gc_work);
1733	del_timer_sync(&tbl->proxy_timer);
1734	pneigh_queue_purge(&tbl->proxy_queue);
1735	neigh_ifdown(tbl, NULL);
1736	if (atomic_read(&tbl->entries))
1737		pr_crit("neighbour leakage\n");
1738
1739	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1740		 neigh_hash_free_rcu);
1741	tbl->nht = NULL;
1742
1743	kfree(tbl->phash_buckets);
1744	tbl->phash_buckets = NULL;
1745
1746	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1747
1748	free_percpu(tbl->stats);
1749	tbl->stats = NULL;
1750
1751	return 0;
1752}
1753EXPORT_SYMBOL(neigh_table_clear);
1754
1755static struct neigh_table *neigh_find_table(int family)
1756{
1757	struct neigh_table *tbl = NULL;
1758
1759	switch (family) {
1760	case AF_INET:
1761		tbl = neigh_tables[NEIGH_ARP_TABLE];
1762		break;
1763	case AF_INET6:
1764		tbl = neigh_tables[NEIGH_ND_TABLE];
1765		break;
1766	case AF_DECnet:
1767		tbl = neigh_tables[NEIGH_DN_TABLE];
1768		break;
1769	}
1770
1771	return tbl;
1772}
1773
1774const struct nla_policy nda_policy[NDA_MAX+1] = {
 
1775	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1776	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1777	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1778	[NDA_PROBES]		= { .type = NLA_U32 },
1779	[NDA_VLAN]		= { .type = NLA_U16 },
1780	[NDA_PORT]		= { .type = NLA_U16 },
1781	[NDA_VNI]		= { .type = NLA_U32 },
1782	[NDA_IFINDEX]		= { .type = NLA_U32 },
1783	[NDA_MASTER]		= { .type = NLA_U32 },
1784	[NDA_PROTOCOL]		= { .type = NLA_U8 },
 
 
 
1785};
1786
1787static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1788			struct netlink_ext_ack *extack)
1789{
1790	struct net *net = sock_net(skb->sk);
1791	struct ndmsg *ndm;
1792	struct nlattr *dst_attr;
1793	struct neigh_table *tbl;
1794	struct neighbour *neigh;
1795	struct net_device *dev = NULL;
1796	int err = -EINVAL;
1797
1798	ASSERT_RTNL();
1799	if (nlmsg_len(nlh) < sizeof(*ndm))
1800		goto out;
1801
1802	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1803	if (!dst_attr) {
1804		NL_SET_ERR_MSG(extack, "Network address not specified");
1805		goto out;
1806	}
1807
1808	ndm = nlmsg_data(nlh);
1809	if (ndm->ndm_ifindex) {
1810		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1811		if (dev == NULL) {
1812			err = -ENODEV;
1813			goto out;
1814		}
1815	}
1816
1817	tbl = neigh_find_table(ndm->ndm_family);
1818	if (tbl == NULL)
1819		return -EAFNOSUPPORT;
1820
1821	if (nla_len(dst_attr) < (int)tbl->key_len) {
1822		NL_SET_ERR_MSG(extack, "Invalid network address");
1823		goto out;
1824	}
1825
1826	if (ndm->ndm_flags & NTF_PROXY) {
1827		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1828		goto out;
1829	}
1830
1831	if (dev == NULL)
1832		goto out;
1833
1834	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1835	if (neigh == NULL) {
1836		err = -ENOENT;
1837		goto out;
1838	}
1839
1840	err = __neigh_update(neigh, NULL, NUD_FAILED,
1841			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1842			     NETLINK_CB(skb).portid, extack);
1843	write_lock_bh(&tbl->lock);
1844	neigh_release(neigh);
1845	neigh_remove_one(neigh, tbl);
1846	write_unlock_bh(&tbl->lock);
1847
1848out:
1849	return err;
1850}
1851
1852static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1853		     struct netlink_ext_ack *extack)
1854{
1855	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1856		NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1857	struct net *net = sock_net(skb->sk);
1858	struct ndmsg *ndm;
1859	struct nlattr *tb[NDA_MAX+1];
1860	struct neigh_table *tbl;
1861	struct net_device *dev = NULL;
1862	struct neighbour *neigh;
1863	void *dst, *lladdr;
1864	u8 protocol = 0;
 
1865	int err;
1866
1867	ASSERT_RTNL();
1868	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1869				     nda_policy, extack);
1870	if (err < 0)
1871		goto out;
1872
1873	err = -EINVAL;
1874	if (!tb[NDA_DST]) {
1875		NL_SET_ERR_MSG(extack, "Network address not specified");
1876		goto out;
1877	}
1878
1879	ndm = nlmsg_data(nlh);
 
 
 
 
 
 
 
 
 
1880	if (ndm->ndm_ifindex) {
1881		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1882		if (dev == NULL) {
1883			err = -ENODEV;
1884			goto out;
1885		}
1886
1887		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1888			NL_SET_ERR_MSG(extack, "Invalid link address");
1889			goto out;
1890		}
1891	}
1892
1893	tbl = neigh_find_table(ndm->ndm_family);
1894	if (tbl == NULL)
1895		return -EAFNOSUPPORT;
1896
1897	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1898		NL_SET_ERR_MSG(extack, "Invalid network address");
1899		goto out;
1900	}
1901
1902	dst = nla_data(tb[NDA_DST]);
1903	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1904
1905	if (tb[NDA_PROTOCOL])
1906		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1907
1908	if (ndm->ndm_flags & NTF_PROXY) {
1909		struct pneigh_entry *pn;
1910
 
 
 
 
 
1911		err = -ENOBUFS;
1912		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1913		if (pn) {
1914			pn->flags = ndm->ndm_flags;
1915			if (protocol)
1916				pn->protocol = protocol;
1917			err = 0;
1918		}
1919		goto out;
1920	}
1921
1922	if (!dev) {
1923		NL_SET_ERR_MSG(extack, "Device not specified");
1924		goto out;
1925	}
1926
1927	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1928		err = -EINVAL;
1929		goto out;
1930	}
1931
1932	neigh = neigh_lookup(tbl, dst, dev);
1933	if (neigh == NULL) {
1934		bool exempt_from_gc;
 
 
1935
1936		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1937			err = -ENOENT;
1938			goto out;
1939		}
 
 
 
 
 
1940
1941		exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1942				 ndm->ndm_flags & NTF_EXT_LEARNED;
1943		neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
 
1944		if (IS_ERR(neigh)) {
1945			err = PTR_ERR(neigh);
1946			goto out;
1947		}
1948	} else {
1949		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1950			err = -EEXIST;
1951			neigh_release(neigh);
1952			goto out;
1953		}
1954
1955		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1956			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1957				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1958	}
1959
1960	if (ndm->ndm_flags & NTF_EXT_LEARNED)
 
 
1961		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1962
1963	if (ndm->ndm_flags & NTF_ROUTER)
1964		flags |= NEIGH_UPDATE_F_ISROUTER;
 
 
 
 
1965
1966	if (ndm->ndm_flags & NTF_USE) {
 
 
1967		neigh_event_send(neigh, NULL);
1968		err = 0;
1969	} else
1970		err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1971				     NETLINK_CB(skb).portid, extack);
1972
1973	if (protocol)
1974		neigh->protocol = protocol;
1975
1976	neigh_release(neigh);
1977
1978out:
1979	return err;
1980}
1981
1982static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1983{
1984	struct nlattr *nest;
1985
1986	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
1987	if (nest == NULL)
1988		return -ENOBUFS;
1989
1990	if ((parms->dev &&
1991	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1992	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1993	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1994			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1995	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1996	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1997			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1998	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1999	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2000	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2001			NEIGH_VAR(parms, UCAST_PROBES)) ||
2002	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2003			NEIGH_VAR(parms, MCAST_PROBES)) ||
2004	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2005			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2006	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2007			  NDTPA_PAD) ||
2008	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2009			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2010	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2011			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2012	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2013			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2014	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2015			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2016	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2017			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2018	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2019			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2020	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2021			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
 
 
2022		goto nla_put_failure;
2023	return nla_nest_end(skb, nest);
2024
2025nla_put_failure:
2026	nla_nest_cancel(skb, nest);
2027	return -EMSGSIZE;
2028}
2029
2030static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2031			      u32 pid, u32 seq, int type, int flags)
2032{
2033	struct nlmsghdr *nlh;
2034	struct ndtmsg *ndtmsg;
2035
2036	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2037	if (nlh == NULL)
2038		return -EMSGSIZE;
2039
2040	ndtmsg = nlmsg_data(nlh);
2041
2042	read_lock_bh(&tbl->lock);
2043	ndtmsg->ndtm_family = tbl->family;
2044	ndtmsg->ndtm_pad1   = 0;
2045	ndtmsg->ndtm_pad2   = 0;
2046
2047	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2048	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2049	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2050	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2051	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
 
2052		goto nla_put_failure;
2053	{
2054		unsigned long now = jiffies;
2055		unsigned int flush_delta = now - tbl->last_flush;
2056		unsigned int rand_delta = now - tbl->last_rand;
2057		struct neigh_hash_table *nht;
2058		struct ndt_config ndc = {
2059			.ndtc_key_len		= tbl->key_len,
2060			.ndtc_entry_size	= tbl->entry_size,
2061			.ndtc_entries		= atomic_read(&tbl->entries),
2062			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2063			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2064			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
2065		};
2066
2067		rcu_read_lock_bh();
2068		nht = rcu_dereference_bh(tbl->nht);
2069		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2070		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2071		rcu_read_unlock_bh();
2072
2073		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2074			goto nla_put_failure;
2075	}
2076
2077	{
2078		int cpu;
2079		struct ndt_stats ndst;
2080
2081		memset(&ndst, 0, sizeof(ndst));
2082
2083		for_each_possible_cpu(cpu) {
2084			struct neigh_statistics	*st;
2085
2086			st = per_cpu_ptr(tbl->stats, cpu);
2087			ndst.ndts_allocs		+= st->allocs;
2088			ndst.ndts_destroys		+= st->destroys;
2089			ndst.ndts_hash_grows		+= st->hash_grows;
2090			ndst.ndts_res_failed		+= st->res_failed;
2091			ndst.ndts_lookups		+= st->lookups;
2092			ndst.ndts_hits			+= st->hits;
2093			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
2094			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
2095			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
2096			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
2097			ndst.ndts_table_fulls		+= st->table_fulls;
2098		}
2099
2100		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2101				  NDTA_PAD))
2102			goto nla_put_failure;
2103	}
2104
2105	BUG_ON(tbl->parms.dev);
2106	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2107		goto nla_put_failure;
2108
2109	read_unlock_bh(&tbl->lock);
2110	nlmsg_end(skb, nlh);
2111	return 0;
2112
2113nla_put_failure:
2114	read_unlock_bh(&tbl->lock);
2115	nlmsg_cancel(skb, nlh);
2116	return -EMSGSIZE;
2117}
2118
2119static int neightbl_fill_param_info(struct sk_buff *skb,
2120				    struct neigh_table *tbl,
2121				    struct neigh_parms *parms,
2122				    u32 pid, u32 seq, int type,
2123				    unsigned int flags)
2124{
2125	struct ndtmsg *ndtmsg;
2126	struct nlmsghdr *nlh;
2127
2128	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2129	if (nlh == NULL)
2130		return -EMSGSIZE;
2131
2132	ndtmsg = nlmsg_data(nlh);
2133
2134	read_lock_bh(&tbl->lock);
2135	ndtmsg->ndtm_family = tbl->family;
2136	ndtmsg->ndtm_pad1   = 0;
2137	ndtmsg->ndtm_pad2   = 0;
2138
2139	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2140	    neightbl_fill_parms(skb, parms) < 0)
2141		goto errout;
2142
2143	read_unlock_bh(&tbl->lock);
2144	nlmsg_end(skb, nlh);
2145	return 0;
2146errout:
2147	read_unlock_bh(&tbl->lock);
2148	nlmsg_cancel(skb, nlh);
2149	return -EMSGSIZE;
2150}
2151
2152static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2153	[NDTA_NAME]		= { .type = NLA_STRING },
2154	[NDTA_THRESH1]		= { .type = NLA_U32 },
2155	[NDTA_THRESH2]		= { .type = NLA_U32 },
2156	[NDTA_THRESH3]		= { .type = NLA_U32 },
2157	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2158	[NDTA_PARMS]		= { .type = NLA_NESTED },
2159};
2160
2161static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2162	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2163	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2164	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2165	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2166	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2167	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2168	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2169	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2170	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2171	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2172	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2173	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2174	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2175	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
 
2176};
2177
2178static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2179			struct netlink_ext_ack *extack)
2180{
2181	struct net *net = sock_net(skb->sk);
2182	struct neigh_table *tbl;
2183	struct ndtmsg *ndtmsg;
2184	struct nlattr *tb[NDTA_MAX+1];
2185	bool found = false;
2186	int err, tidx;
2187
2188	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2189				     nl_neightbl_policy, extack);
2190	if (err < 0)
2191		goto errout;
2192
2193	if (tb[NDTA_NAME] == NULL) {
2194		err = -EINVAL;
2195		goto errout;
2196	}
2197
2198	ndtmsg = nlmsg_data(nlh);
2199
2200	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2201		tbl = neigh_tables[tidx];
2202		if (!tbl)
2203			continue;
2204		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2205			continue;
2206		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2207			found = true;
2208			break;
2209		}
2210	}
2211
2212	if (!found)
2213		return -ENOENT;
2214
2215	/*
2216	 * We acquire tbl->lock to be nice to the periodic timers and
2217	 * make sure they always see a consistent set of values.
2218	 */
2219	write_lock_bh(&tbl->lock);
2220
2221	if (tb[NDTA_PARMS]) {
2222		struct nlattr *tbp[NDTPA_MAX+1];
2223		struct neigh_parms *p;
2224		int i, ifindex = 0;
2225
2226		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2227						  tb[NDTA_PARMS],
2228						  nl_ntbl_parm_policy, extack);
2229		if (err < 0)
2230			goto errout_tbl_lock;
2231
2232		if (tbp[NDTPA_IFINDEX])
2233			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2234
2235		p = lookup_neigh_parms(tbl, net, ifindex);
2236		if (p == NULL) {
2237			err = -ENOENT;
2238			goto errout_tbl_lock;
2239		}
2240
2241		for (i = 1; i <= NDTPA_MAX; i++) {
2242			if (tbp[i] == NULL)
2243				continue;
2244
2245			switch (i) {
2246			case NDTPA_QUEUE_LEN:
2247				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2248					      nla_get_u32(tbp[i]) *
2249					      SKB_TRUESIZE(ETH_FRAME_LEN));
2250				break;
2251			case NDTPA_QUEUE_LENBYTES:
2252				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2253					      nla_get_u32(tbp[i]));
2254				break;
2255			case NDTPA_PROXY_QLEN:
2256				NEIGH_VAR_SET(p, PROXY_QLEN,
2257					      nla_get_u32(tbp[i]));
2258				break;
2259			case NDTPA_APP_PROBES:
2260				NEIGH_VAR_SET(p, APP_PROBES,
2261					      nla_get_u32(tbp[i]));
2262				break;
2263			case NDTPA_UCAST_PROBES:
2264				NEIGH_VAR_SET(p, UCAST_PROBES,
2265					      nla_get_u32(tbp[i]));
2266				break;
2267			case NDTPA_MCAST_PROBES:
2268				NEIGH_VAR_SET(p, MCAST_PROBES,
2269					      nla_get_u32(tbp[i]));
2270				break;
2271			case NDTPA_MCAST_REPROBES:
2272				NEIGH_VAR_SET(p, MCAST_REPROBES,
2273					      nla_get_u32(tbp[i]));
2274				break;
2275			case NDTPA_BASE_REACHABLE_TIME:
2276				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2277					      nla_get_msecs(tbp[i]));
2278				/* update reachable_time as well, otherwise, the change will
2279				 * only be effective after the next time neigh_periodic_work
2280				 * decides to recompute it (can be multiple minutes)
2281				 */
2282				p->reachable_time =
2283					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2284				break;
2285			case NDTPA_GC_STALETIME:
2286				NEIGH_VAR_SET(p, GC_STALETIME,
2287					      nla_get_msecs(tbp[i]));
2288				break;
2289			case NDTPA_DELAY_PROBE_TIME:
2290				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2291					      nla_get_msecs(tbp[i]));
2292				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2293				break;
 
 
 
 
2294			case NDTPA_RETRANS_TIME:
2295				NEIGH_VAR_SET(p, RETRANS_TIME,
2296					      nla_get_msecs(tbp[i]));
2297				break;
2298			case NDTPA_ANYCAST_DELAY:
2299				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2300					      nla_get_msecs(tbp[i]));
2301				break;
2302			case NDTPA_PROXY_DELAY:
2303				NEIGH_VAR_SET(p, PROXY_DELAY,
2304					      nla_get_msecs(tbp[i]));
2305				break;
2306			case NDTPA_LOCKTIME:
2307				NEIGH_VAR_SET(p, LOCKTIME,
2308					      nla_get_msecs(tbp[i]));
2309				break;
2310			}
2311		}
2312	}
2313
2314	err = -ENOENT;
2315	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2316	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2317	    !net_eq(net, &init_net))
2318		goto errout_tbl_lock;
2319
2320	if (tb[NDTA_THRESH1])
2321		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2322
2323	if (tb[NDTA_THRESH2])
2324		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2325
2326	if (tb[NDTA_THRESH3])
2327		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2328
2329	if (tb[NDTA_GC_INTERVAL])
2330		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2331
2332	err = 0;
2333
2334errout_tbl_lock:
2335	write_unlock_bh(&tbl->lock);
2336errout:
2337	return err;
2338}
2339
2340static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2341				    struct netlink_ext_ack *extack)
2342{
2343	struct ndtmsg *ndtm;
2344
2345	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2346		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2347		return -EINVAL;
2348	}
2349
2350	ndtm = nlmsg_data(nlh);
2351	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2352		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2353		return -EINVAL;
2354	}
2355
2356	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2357		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2358		return -EINVAL;
2359	}
2360
2361	return 0;
2362}
2363
2364static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2365{
2366	const struct nlmsghdr *nlh = cb->nlh;
2367	struct net *net = sock_net(skb->sk);
2368	int family, tidx, nidx = 0;
2369	int tbl_skip = cb->args[0];
2370	int neigh_skip = cb->args[1];
2371	struct neigh_table *tbl;
2372
2373	if (cb->strict_check) {
2374		int err = neightbl_valid_dump_info(nlh, cb->extack);
2375
2376		if (err < 0)
2377			return err;
2378	}
2379
2380	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2381
2382	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2383		struct neigh_parms *p;
2384
2385		tbl = neigh_tables[tidx];
2386		if (!tbl)
2387			continue;
2388
2389		if (tidx < tbl_skip || (family && tbl->family != family))
2390			continue;
2391
2392		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2393				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2394				       NLM_F_MULTI) < 0)
2395			break;
2396
2397		nidx = 0;
2398		p = list_next_entry(&tbl->parms, list);
2399		list_for_each_entry_from(p, &tbl->parms_list, list) {
2400			if (!net_eq(neigh_parms_net(p), net))
2401				continue;
2402
2403			if (nidx < neigh_skip)
2404				goto next;
2405
2406			if (neightbl_fill_param_info(skb, tbl, p,
2407						     NETLINK_CB(cb->skb).portid,
2408						     nlh->nlmsg_seq,
2409						     RTM_NEWNEIGHTBL,
2410						     NLM_F_MULTI) < 0)
2411				goto out;
2412		next:
2413			nidx++;
2414		}
2415
2416		neigh_skip = 0;
2417	}
2418out:
2419	cb->args[0] = tidx;
2420	cb->args[1] = nidx;
2421
2422	return skb->len;
2423}
2424
2425static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2426			   u32 pid, u32 seq, int type, unsigned int flags)
2427{
 
2428	unsigned long now = jiffies;
2429	struct nda_cacheinfo ci;
2430	struct nlmsghdr *nlh;
2431	struct ndmsg *ndm;
2432
2433	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2434	if (nlh == NULL)
2435		return -EMSGSIZE;
2436
 
 
 
2437	ndm = nlmsg_data(nlh);
2438	ndm->ndm_family	 = neigh->ops->family;
2439	ndm->ndm_pad1    = 0;
2440	ndm->ndm_pad2    = 0;
2441	ndm->ndm_flags	 = neigh->flags;
2442	ndm->ndm_type	 = neigh->type;
2443	ndm->ndm_ifindex = neigh->dev->ifindex;
2444
2445	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2446		goto nla_put_failure;
2447
2448	read_lock_bh(&neigh->lock);
2449	ndm->ndm_state	 = neigh->nud_state;
2450	if (neigh->nud_state & NUD_VALID) {
2451		char haddr[MAX_ADDR_LEN];
2452
2453		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2454		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2455			read_unlock_bh(&neigh->lock);
2456			goto nla_put_failure;
2457		}
2458	}
2459
2460	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2461	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2462	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2463	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2464	read_unlock_bh(&neigh->lock);
2465
2466	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2467	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2468		goto nla_put_failure;
2469
2470	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2471		goto nla_put_failure;
 
 
2472
2473	nlmsg_end(skb, nlh);
2474	return 0;
2475
2476nla_put_failure:
2477	nlmsg_cancel(skb, nlh);
2478	return -EMSGSIZE;
2479}
2480
2481static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2482			    u32 pid, u32 seq, int type, unsigned int flags,
2483			    struct neigh_table *tbl)
2484{
 
2485	struct nlmsghdr *nlh;
2486	struct ndmsg *ndm;
2487
2488	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2489	if (nlh == NULL)
2490		return -EMSGSIZE;
2491
 
 
 
2492	ndm = nlmsg_data(nlh);
2493	ndm->ndm_family	 = tbl->family;
2494	ndm->ndm_pad1    = 0;
2495	ndm->ndm_pad2    = 0;
2496	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2497	ndm->ndm_type	 = RTN_UNICAST;
2498	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2499	ndm->ndm_state	 = NUD_NONE;
2500
2501	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2502		goto nla_put_failure;
2503
2504	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2505		goto nla_put_failure;
 
 
2506
2507	nlmsg_end(skb, nlh);
2508	return 0;
2509
2510nla_put_failure:
2511	nlmsg_cancel(skb, nlh);
2512	return -EMSGSIZE;
2513}
2514
2515static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2516{
2517	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2518	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2519}
2520
2521static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2522{
2523	struct net_device *master;
2524
2525	if (!master_idx)
2526		return false;
2527
2528	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
 
 
 
 
 
 
 
2529	if (!master || master->ifindex != master_idx)
2530		return true;
2531
2532	return false;
2533}
2534
2535static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2536{
2537	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2538		return true;
2539
2540	return false;
2541}
2542
2543struct neigh_dump_filter {
2544	int master_idx;
2545	int dev_idx;
2546};
2547
2548static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2549			    struct netlink_callback *cb,
2550			    struct neigh_dump_filter *filter)
2551{
2552	struct net *net = sock_net(skb->sk);
2553	struct neighbour *n;
2554	int rc, h, s_h = cb->args[1];
2555	int idx, s_idx = idx = cb->args[2];
2556	struct neigh_hash_table *nht;
2557	unsigned int flags = NLM_F_MULTI;
2558
2559	if (filter->dev_idx || filter->master_idx)
2560		flags |= NLM_F_DUMP_FILTERED;
2561
2562	rcu_read_lock_bh();
2563	nht = rcu_dereference_bh(tbl->nht);
2564
2565	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2566		if (h > s_h)
2567			s_idx = 0;
2568		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2569		     n != NULL;
2570		     n = rcu_dereference_bh(n->next)) {
2571			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2572				goto next;
2573			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2574			    neigh_master_filtered(n->dev, filter->master_idx))
2575				goto next;
2576			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2577					    cb->nlh->nlmsg_seq,
2578					    RTM_NEWNEIGH,
2579					    flags) < 0) {
2580				rc = -1;
2581				goto out;
2582			}
2583next:
2584			idx++;
2585		}
2586	}
2587	rc = skb->len;
2588out:
2589	rcu_read_unlock_bh();
2590	cb->args[1] = h;
2591	cb->args[2] = idx;
2592	return rc;
2593}
2594
2595static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2596			     struct netlink_callback *cb,
2597			     struct neigh_dump_filter *filter)
2598{
2599	struct pneigh_entry *n;
2600	struct net *net = sock_net(skb->sk);
2601	int rc, h, s_h = cb->args[3];
2602	int idx, s_idx = idx = cb->args[4];
2603	unsigned int flags = NLM_F_MULTI;
2604
2605	if (filter->dev_idx || filter->master_idx)
2606		flags |= NLM_F_DUMP_FILTERED;
2607
2608	read_lock_bh(&tbl->lock);
2609
2610	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2611		if (h > s_h)
2612			s_idx = 0;
2613		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2614			if (idx < s_idx || pneigh_net(n) != net)
2615				goto next;
2616			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2617			    neigh_master_filtered(n->dev, filter->master_idx))
2618				goto next;
2619			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2620					    cb->nlh->nlmsg_seq,
2621					    RTM_NEWNEIGH, flags, tbl) < 0) {
 
2622				read_unlock_bh(&tbl->lock);
2623				rc = -1;
2624				goto out;
2625			}
2626		next:
2627			idx++;
2628		}
2629	}
2630
2631	read_unlock_bh(&tbl->lock);
2632	rc = skb->len;
2633out:
2634	cb->args[3] = h;
2635	cb->args[4] = idx;
2636	return rc;
2637
2638}
2639
2640static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2641				bool strict_check,
2642				struct neigh_dump_filter *filter,
2643				struct netlink_ext_ack *extack)
2644{
2645	struct nlattr *tb[NDA_MAX + 1];
2646	int err, i;
2647
2648	if (strict_check) {
2649		struct ndmsg *ndm;
2650
2651		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2652			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2653			return -EINVAL;
2654		}
2655
2656		ndm = nlmsg_data(nlh);
2657		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2658		    ndm->ndm_state || ndm->ndm_type) {
2659			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2660			return -EINVAL;
2661		}
2662
2663		if (ndm->ndm_flags & ~NTF_PROXY) {
2664			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2665			return -EINVAL;
2666		}
2667
2668		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2669						    tb, NDA_MAX, nda_policy,
2670						    extack);
2671	} else {
2672		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2673					     NDA_MAX, nda_policy, extack);
2674	}
2675	if (err < 0)
2676		return err;
2677
2678	for (i = 0; i <= NDA_MAX; ++i) {
2679		if (!tb[i])
2680			continue;
2681
2682		/* all new attributes should require strict_check */
2683		switch (i) {
2684		case NDA_IFINDEX:
2685			filter->dev_idx = nla_get_u32(tb[i]);
2686			break;
2687		case NDA_MASTER:
2688			filter->master_idx = nla_get_u32(tb[i]);
2689			break;
2690		default:
2691			if (strict_check) {
2692				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2693				return -EINVAL;
2694			}
2695		}
2696	}
2697
2698	return 0;
2699}
2700
2701static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2702{
2703	const struct nlmsghdr *nlh = cb->nlh;
2704	struct neigh_dump_filter filter = {};
2705	struct neigh_table *tbl;
2706	int t, family, s_t;
2707	int proxy = 0;
2708	int err;
2709
2710	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2711
2712	/* check for full ndmsg structure presence, family member is
2713	 * the same for both structures
2714	 */
2715	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2716	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2717		proxy = 1;
2718
2719	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2720	if (err < 0 && cb->strict_check)
2721		return err;
 
2722
2723	s_t = cb->args[0];
2724
 
2725	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2726		tbl = neigh_tables[t];
2727
2728		if (!tbl)
2729			continue;
2730		if (t < s_t || (family && tbl->family != family))
2731			continue;
2732		if (t > s_t)
2733			memset(&cb->args[1], 0, sizeof(cb->args) -
2734						sizeof(cb->args[0]));
2735		if (proxy)
2736			err = pneigh_dump_table(tbl, skb, cb, &filter);
2737		else
2738			err = neigh_dump_table(tbl, skb, cb, &filter);
2739		if (err < 0)
2740			break;
2741	}
 
2742
2743	cb->args[0] = t;
2744	return skb->len;
2745}
2746
2747static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2748			       struct neigh_table **tbl,
2749			       void **dst, int *dev_idx, u8 *ndm_flags,
2750			       struct netlink_ext_ack *extack)
2751{
2752	struct nlattr *tb[NDA_MAX + 1];
2753	struct ndmsg *ndm;
2754	int err, i;
2755
2756	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2757		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2758		return -EINVAL;
2759	}
2760
2761	ndm = nlmsg_data(nlh);
2762	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2763	    ndm->ndm_type) {
2764		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2765		return -EINVAL;
2766	}
2767
2768	if (ndm->ndm_flags & ~NTF_PROXY) {
2769		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2770		return -EINVAL;
2771	}
2772
2773	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2774					    NDA_MAX, nda_policy, extack);
2775	if (err < 0)
2776		return err;
2777
2778	*ndm_flags = ndm->ndm_flags;
2779	*dev_idx = ndm->ndm_ifindex;
2780	*tbl = neigh_find_table(ndm->ndm_family);
2781	if (*tbl == NULL) {
2782		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2783		return -EAFNOSUPPORT;
2784	}
2785
2786	for (i = 0; i <= NDA_MAX; ++i) {
2787		if (!tb[i])
2788			continue;
2789
2790		switch (i) {
2791		case NDA_DST:
2792			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2793				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2794				return -EINVAL;
2795			}
2796			*dst = nla_data(tb[i]);
2797			break;
2798		default:
2799			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2800			return -EINVAL;
2801		}
2802	}
2803
2804	return 0;
2805}
2806
2807static inline size_t neigh_nlmsg_size(void)
2808{
2809	return NLMSG_ALIGN(sizeof(struct ndmsg))
2810	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2811	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2812	       + nla_total_size(sizeof(struct nda_cacheinfo))
2813	       + nla_total_size(4)  /* NDA_PROBES */
 
2814	       + nla_total_size(1); /* NDA_PROTOCOL */
2815}
2816
2817static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2818			   u32 pid, u32 seq)
2819{
2820	struct sk_buff *skb;
2821	int err = 0;
2822
2823	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2824	if (!skb)
2825		return -ENOBUFS;
2826
2827	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2828	if (err) {
2829		kfree_skb(skb);
2830		goto errout;
2831	}
2832
2833	err = rtnl_unicast(skb, net, pid);
2834errout:
2835	return err;
2836}
2837
2838static inline size_t pneigh_nlmsg_size(void)
2839{
2840	return NLMSG_ALIGN(sizeof(struct ndmsg))
2841	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
 
2842	       + nla_total_size(1); /* NDA_PROTOCOL */
2843}
2844
2845static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2846			    u32 pid, u32 seq, struct neigh_table *tbl)
2847{
2848	struct sk_buff *skb;
2849	int err = 0;
2850
2851	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2852	if (!skb)
2853		return -ENOBUFS;
2854
2855	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2856	if (err) {
2857		kfree_skb(skb);
2858		goto errout;
2859	}
2860
2861	err = rtnl_unicast(skb, net, pid);
2862errout:
2863	return err;
2864}
2865
2866static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2867		     struct netlink_ext_ack *extack)
2868{
2869	struct net *net = sock_net(in_skb->sk);
2870	struct net_device *dev = NULL;
2871	struct neigh_table *tbl = NULL;
2872	struct neighbour *neigh;
2873	void *dst = NULL;
2874	u8 ndm_flags = 0;
2875	int dev_idx = 0;
2876	int err;
2877
2878	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2879				  extack);
2880	if (err < 0)
2881		return err;
2882
2883	if (dev_idx) {
2884		dev = __dev_get_by_index(net, dev_idx);
2885		if (!dev) {
2886			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2887			return -ENODEV;
2888		}
2889	}
2890
2891	if (!dst) {
2892		NL_SET_ERR_MSG(extack, "Network address not specified");
2893		return -EINVAL;
2894	}
2895
2896	if (ndm_flags & NTF_PROXY) {
2897		struct pneigh_entry *pn;
2898
2899		pn = pneigh_lookup(tbl, net, dst, dev, 0);
2900		if (!pn) {
2901			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2902			return -ENOENT;
2903		}
2904		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2905					nlh->nlmsg_seq, tbl);
2906	}
2907
2908	if (!dev) {
2909		NL_SET_ERR_MSG(extack, "No device specified");
2910		return -EINVAL;
2911	}
2912
2913	neigh = neigh_lookup(tbl, dst, dev);
2914	if (!neigh) {
2915		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2916		return -ENOENT;
2917	}
2918
2919	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2920			      nlh->nlmsg_seq);
2921
2922	neigh_release(neigh);
2923
2924	return err;
2925}
2926
2927void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2928{
2929	int chain;
2930	struct neigh_hash_table *nht;
2931
2932	rcu_read_lock_bh();
2933	nht = rcu_dereference_bh(tbl->nht);
2934
2935	read_lock(&tbl->lock); /* avoid resizes */
2936	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2937		struct neighbour *n;
2938
2939		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2940		     n != NULL;
2941		     n = rcu_dereference_bh(n->next))
2942			cb(n, cookie);
2943	}
2944	read_unlock(&tbl->lock);
2945	rcu_read_unlock_bh();
2946}
2947EXPORT_SYMBOL(neigh_for_each);
2948
2949/* The tbl->lock must be held as a writer and BH disabled. */
2950void __neigh_for_each_release(struct neigh_table *tbl,
2951			      int (*cb)(struct neighbour *))
2952{
2953	int chain;
2954	struct neigh_hash_table *nht;
 
2955
2956	nht = rcu_dereference_protected(tbl->nht,
2957					lockdep_is_held(&tbl->lock));
2958	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
 
2959		struct neighbour *n;
2960		struct neighbour __rcu **np;
2961
2962		np = &nht->hash_buckets[chain];
2963		while ((n = rcu_dereference_protected(*np,
2964					lockdep_is_held(&tbl->lock))) != NULL) {
2965			int release;
2966
2967			write_lock(&n->lock);
2968			release = cb(n);
2969			if (release) {
2970				rcu_assign_pointer(*np,
2971					rcu_dereference_protected(n->next,
2972						lockdep_is_held(&tbl->lock)));
2973				neigh_mark_dead(n);
2974			} else
2975				np = &n->next;
2976			write_unlock(&n->lock);
2977			if (release)
2978				neigh_cleanup_and_release(n);
2979		}
2980	}
2981}
2982EXPORT_SYMBOL(__neigh_for_each_release);
2983
2984int neigh_xmit(int index, struct net_device *dev,
2985	       const void *addr, struct sk_buff *skb)
2986{
2987	int err = -EAFNOSUPPORT;
 
2988	if (likely(index < NEIGH_NR_TABLES)) {
2989		struct neigh_table *tbl;
2990		struct neighbour *neigh;
2991
2992		tbl = neigh_tables[index];
 
2993		if (!tbl)
2994			goto out;
2995		rcu_read_lock_bh();
2996		if (index == NEIGH_ARP_TABLE) {
2997			u32 key = *((u32 *)addr);
2998
2999			neigh = __ipv4_neigh_lookup_noref(dev, key);
3000		} else {
3001			neigh = __neigh_lookup_noref(tbl, addr, dev);
3002		}
3003		if (!neigh)
3004			neigh = __neigh_create(tbl, addr, dev, false);
3005		err = PTR_ERR(neigh);
3006		if (IS_ERR(neigh)) {
3007			rcu_read_unlock_bh();
3008			goto out_kfree_skb;
3009		}
3010		err = neigh->output(neigh, skb);
3011		rcu_read_unlock_bh();
 
3012	}
3013	else if (index == NEIGH_LINK_TABLE) {
3014		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3015				      addr, NULL, skb->len);
3016		if (err < 0)
3017			goto out_kfree_skb;
3018		err = dev_queue_xmit(skb);
3019	}
3020out:
3021	return err;
3022out_kfree_skb:
3023	kfree_skb(skb);
3024	goto out;
3025}
3026EXPORT_SYMBOL(neigh_xmit);
3027
3028#ifdef CONFIG_PROC_FS
3029
3030static struct neighbour *neigh_get_first(struct seq_file *seq)
 
 
3031{
3032	struct neigh_seq_state *state = seq->private;
3033	struct net *net = seq_file_net(seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3034	struct neigh_hash_table *nht = state->nht;
3035	struct neighbour *n = NULL;
3036	int bucket;
3037
3038	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3039	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3040		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3041
3042		while (n) {
3043			if (!net_eq(dev_net(n->dev), net))
3044				goto next;
3045			if (state->neigh_sub_iter) {
3046				loff_t fakep = 0;
3047				void *v;
3048
3049				v = state->neigh_sub_iter(state, n, &fakep);
3050				if (!v)
3051					goto next;
3052			}
3053			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3054				break;
3055			if (n->nud_state & ~NUD_NOARP)
3056				break;
3057next:
3058			n = rcu_dereference_bh(n->next);
3059		}
3060
3061		if (n)
3062			break;
3063	}
3064	state->bucket = bucket;
3065
3066	return n;
3067}
3068
3069static struct neighbour *neigh_get_next(struct seq_file *seq,
3070					struct neighbour *n,
3071					loff_t *pos)
3072{
3073	struct neigh_seq_state *state = seq->private;
3074	struct net *net = seq_file_net(seq);
3075	struct neigh_hash_table *nht = state->nht;
3076
3077	if (state->neigh_sub_iter) {
3078		void *v = state->neigh_sub_iter(state, n, pos);
 
3079		if (v)
3080			return n;
3081	}
3082	n = rcu_dereference_bh(n->next);
3083
3084	while (1) {
3085		while (n) {
3086			if (!net_eq(dev_net(n->dev), net))
3087				goto next;
3088			if (state->neigh_sub_iter) {
3089				void *v = state->neigh_sub_iter(state, n, pos);
3090				if (v)
3091					return n;
3092				goto next;
3093			}
3094			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3095				break;
3096
3097			if (n->nud_state & ~NUD_NOARP)
3098				break;
3099next:
3100			n = rcu_dereference_bh(n->next);
 
3101		}
3102
3103		if (n)
3104			break;
3105
3106		if (++state->bucket >= (1 << nht->hash_shift))
3107			break;
3108
3109		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3110	}
3111
 
 
3112	if (n && pos)
3113		--(*pos);
 
3114	return n;
3115}
3116
3117static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3118{
3119	struct neighbour *n = neigh_get_first(seq);
3120
3121	if (n) {
3122		--(*pos);
3123		while (*pos) {
3124			n = neigh_get_next(seq, n, pos);
3125			if (!n)
3126				break;
3127		}
3128	}
3129	return *pos ? NULL : n;
3130}
3131
3132static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3133{
3134	struct neigh_seq_state *state = seq->private;
3135	struct net *net = seq_file_net(seq);
3136	struct neigh_table *tbl = state->tbl;
3137	struct pneigh_entry *pn = NULL;
3138	int bucket = state->bucket;
3139
3140	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3141	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3142		pn = tbl->phash_buckets[bucket];
3143		while (pn && !net_eq(pneigh_net(pn), net))
3144			pn = pn->next;
3145		if (pn)
3146			break;
3147	}
3148	state->bucket = bucket;
3149
3150	return pn;
3151}
3152
3153static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3154					    struct pneigh_entry *pn,
3155					    loff_t *pos)
3156{
3157	struct neigh_seq_state *state = seq->private;
3158	struct net *net = seq_file_net(seq);
3159	struct neigh_table *tbl = state->tbl;
3160
3161	do {
3162		pn = pn->next;
3163	} while (pn && !net_eq(pneigh_net(pn), net));
3164
3165	while (!pn) {
3166		if (++state->bucket > PNEIGH_HASHMASK)
3167			break;
3168		pn = tbl->phash_buckets[state->bucket];
3169		while (pn && !net_eq(pneigh_net(pn), net))
3170			pn = pn->next;
3171		if (pn)
3172			break;
3173	}
3174
3175	if (pn && pos)
3176		--(*pos);
3177
3178	return pn;
3179}
3180
3181static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3182{
3183	struct pneigh_entry *pn = pneigh_get_first(seq);
3184
3185	if (pn) {
3186		--(*pos);
3187		while (*pos) {
3188			pn = pneigh_get_next(seq, pn, pos);
3189			if (!pn)
3190				break;
3191		}
3192	}
3193	return *pos ? NULL : pn;
3194}
3195
3196static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3197{
3198	struct neigh_seq_state *state = seq->private;
3199	void *rc;
3200	loff_t idxpos = *pos;
3201
3202	rc = neigh_get_idx(seq, &idxpos);
3203	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3204		rc = pneigh_get_idx(seq, &idxpos);
3205
3206	return rc;
3207}
3208
3209void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3210	__acquires(tbl->lock)
3211	__acquires(rcu_bh)
3212{
3213	struct neigh_seq_state *state = seq->private;
3214
3215	state->tbl = tbl;
3216	state->bucket = 0;
3217	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3218
3219	rcu_read_lock_bh();
3220	state->nht = rcu_dereference_bh(tbl->nht);
3221	read_lock(&tbl->lock);
3222
3223	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3224}
3225EXPORT_SYMBOL(neigh_seq_start);
3226
3227void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3228{
3229	struct neigh_seq_state *state;
3230	void *rc;
3231
3232	if (v == SEQ_START_TOKEN) {
3233		rc = neigh_get_first(seq);
3234		goto out;
3235	}
3236
3237	state = seq->private;
3238	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3239		rc = neigh_get_next(seq, v, NULL);
3240		if (rc)
3241			goto out;
3242		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3243			rc = pneigh_get_first(seq);
3244	} else {
3245		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3246		rc = pneigh_get_next(seq, v, NULL);
3247	}
3248out:
3249	++(*pos);
3250	return rc;
3251}
3252EXPORT_SYMBOL(neigh_seq_next);
3253
3254void neigh_seq_stop(struct seq_file *seq, void *v)
3255	__releases(tbl->lock)
3256	__releases(rcu_bh)
3257{
3258	struct neigh_seq_state *state = seq->private;
3259	struct neigh_table *tbl = state->tbl;
3260
3261	read_unlock(&tbl->lock);
3262	rcu_read_unlock_bh();
3263}
3264EXPORT_SYMBOL(neigh_seq_stop);
3265
3266/* statistics via seq_file */
3267
3268static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3269{
3270	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3271	int cpu;
3272
3273	if (*pos == 0)
3274		return SEQ_START_TOKEN;
3275
3276	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3277		if (!cpu_possible(cpu))
3278			continue;
3279		*pos = cpu+1;
3280		return per_cpu_ptr(tbl->stats, cpu);
3281	}
3282	return NULL;
3283}
3284
3285static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3286{
3287	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3288	int cpu;
3289
3290	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3291		if (!cpu_possible(cpu))
3292			continue;
3293		*pos = cpu+1;
3294		return per_cpu_ptr(tbl->stats, cpu);
3295	}
 
3296	return NULL;
3297}
3298
3299static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3300{
3301
3302}
3303
3304static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3305{
3306	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3307	struct neigh_statistics *st = v;
3308
3309	if (v == SEQ_START_TOKEN) {
3310		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3311		return 0;
3312	}
3313
3314	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
3315			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
 
3316		   atomic_read(&tbl->entries),
3317
3318		   st->allocs,
3319		   st->destroys,
3320		   st->hash_grows,
3321
3322		   st->lookups,
3323		   st->hits,
3324
3325		   st->res_failed,
3326
3327		   st->rcv_probes_mcast,
3328		   st->rcv_probes_ucast,
3329
3330		   st->periodic_gc_runs,
3331		   st->forced_gc_runs,
3332		   st->unres_discards,
3333		   st->table_fulls
3334		   );
3335
3336	return 0;
3337}
3338
3339static const struct seq_operations neigh_stat_seq_ops = {
3340	.start	= neigh_stat_seq_start,
3341	.next	= neigh_stat_seq_next,
3342	.stop	= neigh_stat_seq_stop,
3343	.show	= neigh_stat_seq_show,
3344};
3345#endif /* CONFIG_PROC_FS */
3346
3347static void __neigh_notify(struct neighbour *n, int type, int flags,
3348			   u32 pid)
3349{
3350	struct net *net = dev_net(n->dev);
3351	struct sk_buff *skb;
3352	int err = -ENOBUFS;
 
3353
 
 
3354	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3355	if (skb == NULL)
3356		goto errout;
3357
3358	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3359	if (err < 0) {
3360		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3361		WARN_ON(err == -EMSGSIZE);
3362		kfree_skb(skb);
3363		goto errout;
3364	}
3365	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3366	return;
3367errout:
3368	if (err < 0)
3369		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
 
3370}
3371
3372void neigh_app_ns(struct neighbour *n)
3373{
3374	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3375}
3376EXPORT_SYMBOL(neigh_app_ns);
3377
3378#ifdef CONFIG_SYSCTL
3379static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3380
3381static int proc_unres_qlen(struct ctl_table *ctl, int write,
3382			   void __user *buffer, size_t *lenp, loff_t *ppos)
3383{
3384	int size, ret;
3385	struct ctl_table tmp = *ctl;
3386
3387	tmp.extra1 = SYSCTL_ZERO;
3388	tmp.extra2 = &unres_qlen_max;
3389	tmp.data = &size;
3390
3391	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3392	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3393
3394	if (write && !ret)
3395		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3396	return ret;
3397}
3398
3399static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3400						   int family)
3401{
3402	switch (family) {
3403	case AF_INET:
3404		return __in_dev_arp_parms_get_rcu(dev);
3405	case AF_INET6:
3406		return __in6_dev_nd_parms_get_rcu(dev);
3407	}
3408	return NULL;
3409}
3410
3411static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3412				  int index)
3413{
3414	struct net_device *dev;
3415	int family = neigh_parms_family(p);
3416
3417	rcu_read_lock();
3418	for_each_netdev_rcu(net, dev) {
3419		struct neigh_parms *dst_p =
3420				neigh_get_dev_parms_rcu(dev, family);
3421
3422		if (dst_p && !test_bit(index, dst_p->data_state))
3423			dst_p->data[index] = p->data[index];
3424	}
3425	rcu_read_unlock();
3426}
3427
3428static void neigh_proc_update(struct ctl_table *ctl, int write)
3429{
3430	struct net_device *dev = ctl->extra1;
3431	struct neigh_parms *p = ctl->extra2;
3432	struct net *net = neigh_parms_net(p);
3433	int index = (int *) ctl->data - p->data;
3434
3435	if (!write)
3436		return;
3437
3438	set_bit(index, p->data_state);
3439	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3440		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3441	if (!dev) /* NULL dev means this is default value */
3442		neigh_copy_dflt_parms(net, p, index);
3443}
3444
3445static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3446					   void __user *buffer,
3447					   size_t *lenp, loff_t *ppos)
3448{
3449	struct ctl_table tmp = *ctl;
3450	int ret;
3451
3452	tmp.extra1 = SYSCTL_ZERO;
3453	tmp.extra2 = SYSCTL_INT_MAX;
3454
3455	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3456	neigh_proc_update(ctl, write);
3457	return ret;
3458}
3459
3460int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3461			void __user *buffer, size_t *lenp, loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3462{
3463	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3464
3465	neigh_proc_update(ctl, write);
3466	return ret;
3467}
3468EXPORT_SYMBOL(neigh_proc_dointvec);
3469
3470int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3471				void __user *buffer,
3472				size_t *lenp, loff_t *ppos)
3473{
3474	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3475
3476	neigh_proc_update(ctl, write);
3477	return ret;
3478}
3479EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3480
3481static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3482					      void __user *buffer,
3483					      size_t *lenp, loff_t *ppos)
3484{
3485	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3486
3487	neigh_proc_update(ctl, write);
3488	return ret;
3489}
3490
3491int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3492				   void __user *buffer,
3493				   size_t *lenp, loff_t *ppos)
3494{
3495	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3496
3497	neigh_proc_update(ctl, write);
3498	return ret;
3499}
3500EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3501
3502static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3503					  void __user *buffer,
3504					  size_t *lenp, loff_t *ppos)
3505{
3506	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3507
3508	neigh_proc_update(ctl, write);
3509	return ret;
3510}
3511
3512static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3513					  void __user *buffer,
3514					  size_t *lenp, loff_t *ppos)
3515{
3516	struct neigh_parms *p = ctl->extra2;
3517	int ret;
3518
3519	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3520		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3521	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3522		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3523	else
3524		ret = -1;
3525
3526	if (write && ret == 0) {
3527		/* update reachable_time as well, otherwise, the change will
3528		 * only be effective after the next time neigh_periodic_work
3529		 * decides to recompute it
3530		 */
3531		p->reachable_time =
3532			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3533	}
3534	return ret;
3535}
3536
3537#define NEIGH_PARMS_DATA_OFFSET(index)	\
3538	(&((struct neigh_parms *) 0)->data[index])
3539
3540#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3541	[NEIGH_VAR_ ## attr] = { \
3542		.procname	= name, \
3543		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3544		.maxlen		= sizeof(int), \
3545		.mode		= mval, \
3546		.proc_handler	= proc, \
3547	}
3548
3549#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3550	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3551
3552#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3553	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3554
3555#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3556	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3557
3558#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3559	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3560
3561#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3562	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3563
3564#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3565	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3566
3567static struct neigh_sysctl_table {
3568	struct ctl_table_header *sysctl_header;
3569	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3570} neigh_sysctl_template __read_mostly = {
3571	.neigh_vars = {
3572		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3573		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3574		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3575		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3576		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3577		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3578		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
 
 
3579		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3580		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3581		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3582		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3583		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3584		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3585		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3586		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3587		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3588		[NEIGH_VAR_GC_INTERVAL] = {
3589			.procname	= "gc_interval",
3590			.maxlen		= sizeof(int),
3591			.mode		= 0644,
3592			.proc_handler	= proc_dointvec_jiffies,
3593		},
3594		[NEIGH_VAR_GC_THRESH1] = {
3595			.procname	= "gc_thresh1",
3596			.maxlen		= sizeof(int),
3597			.mode		= 0644,
3598			.extra1		= SYSCTL_ZERO,
3599			.extra2		= SYSCTL_INT_MAX,
3600			.proc_handler	= proc_dointvec_minmax,
3601		},
3602		[NEIGH_VAR_GC_THRESH2] = {
3603			.procname	= "gc_thresh2",
3604			.maxlen		= sizeof(int),
3605			.mode		= 0644,
3606			.extra1		= SYSCTL_ZERO,
3607			.extra2		= SYSCTL_INT_MAX,
3608			.proc_handler	= proc_dointvec_minmax,
3609		},
3610		[NEIGH_VAR_GC_THRESH3] = {
3611			.procname	= "gc_thresh3",
3612			.maxlen		= sizeof(int),
3613			.mode		= 0644,
3614			.extra1		= SYSCTL_ZERO,
3615			.extra2		= SYSCTL_INT_MAX,
3616			.proc_handler	= proc_dointvec_minmax,
3617		},
3618		{},
3619	},
3620};
3621
3622int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3623			  proc_handler *handler)
3624{
3625	int i;
3626	struct neigh_sysctl_table *t;
3627	const char *dev_name_source;
3628	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3629	char *p_name;
 
3630
3631	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3632	if (!t)
3633		goto err;
3634
3635	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3636		t->neigh_vars[i].data += (long) p;
3637		t->neigh_vars[i].extra1 = dev;
3638		t->neigh_vars[i].extra2 = p;
3639	}
3640
 
3641	if (dev) {
3642		dev_name_source = dev->name;
3643		/* Terminate the table early */
3644		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3645		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3646	} else {
3647		struct neigh_table *tbl = p->tbl;
3648		dev_name_source = "default";
3649		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3650		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3651		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3652		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3653	}
3654
3655	if (handler) {
3656		/* RetransTime */
3657		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3658		/* ReachableTime */
3659		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3660		/* RetransTime (in milliseconds)*/
3661		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3662		/* ReachableTime (in milliseconds) */
3663		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3664	} else {
3665		/* Those handlers will update p->reachable_time after
3666		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3667		 * applied after the next neighbour update instead of waiting for
3668		 * neigh_periodic_work to update its value (can be multiple minutes)
3669		 * So any handler that replaces them should do this as well
3670		 */
3671		/* ReachableTime */
3672		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3673			neigh_proc_base_reachable_time;
3674		/* ReachableTime (in milliseconds) */
3675		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3676			neigh_proc_base_reachable_time;
3677	}
3678
3679	/* Don't export sysctls to unprivileged users */
3680	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3681		t->neigh_vars[0].procname = NULL;
3682
3683	switch (neigh_parms_family(p)) {
3684	case AF_INET:
3685	      p_name = "ipv4";
3686	      break;
3687	case AF_INET6:
3688	      p_name = "ipv6";
3689	      break;
3690	default:
3691	      BUG();
3692	}
3693
3694	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3695		p_name, dev_name_source);
3696	t->sysctl_header =
3697		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
 
3698	if (!t->sysctl_header)
3699		goto free;
3700
3701	p->sysctl_table = t;
3702	return 0;
3703
3704free:
3705	kfree(t);
3706err:
3707	return -ENOBUFS;
3708}
3709EXPORT_SYMBOL(neigh_sysctl_register);
3710
3711void neigh_sysctl_unregister(struct neigh_parms *p)
3712{
3713	if (p->sysctl_table) {
3714		struct neigh_sysctl_table *t = p->sysctl_table;
3715		p->sysctl_table = NULL;
3716		unregister_net_sysctl_table(t->sysctl_header);
3717		kfree(t);
3718	}
3719}
3720EXPORT_SYMBOL(neigh_sysctl_unregister);
3721
3722#endif	/* CONFIG_SYSCTL */
3723
 
 
 
 
 
 
 
 
 
3724static int __init neigh_init(void)
3725{
3726	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3727	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3728	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3729
3730	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3731		      0);
3732	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3733
3734	return 0;
3735}
3736
3737subsys_initcall(neigh_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Generic address resolution entity
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   8 *
   9 *	Fixes:
  10 *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
  11 *	Harald Welte		Add neighbour cache statistics like rtstat
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/slab.h>
 
  17#include <linux/types.h>
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/socket.h>
  21#include <linux/netdevice.h>
  22#include <linux/proc_fs.h>
  23#ifdef CONFIG_SYSCTL
  24#include <linux/sysctl.h>
  25#endif
  26#include <linux/times.h>
  27#include <net/net_namespace.h>
  28#include <net/neighbour.h>
  29#include <net/arp.h>
  30#include <net/dst.h>
  31#include <net/sock.h>
  32#include <net/netevent.h>
  33#include <net/netlink.h>
  34#include <linux/rtnetlink.h>
  35#include <linux/random.h>
  36#include <linux/string.h>
  37#include <linux/log2.h>
  38#include <linux/inetdevice.h>
  39#include <net/addrconf.h>
  40
  41#include <trace/events/neigh.h>
  42
 
  43#define NEIGH_DEBUG 1
  44#define neigh_dbg(level, fmt, ...)		\
  45do {						\
  46	if (level <= NEIGH_DEBUG)		\
  47		pr_debug(fmt, ##__VA_ARGS__);	\
  48} while (0)
  49
  50#define PNEIGH_HASHMASK		0xF
  51
  52static void neigh_timer_handler(struct timer_list *t);
  53static void __neigh_notify(struct neighbour *n, int type, int flags,
  54			   u32 pid);
  55static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
  56static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
  57				    struct net_device *dev);
  58
  59#ifdef CONFIG_PROC_FS
  60static const struct seq_operations neigh_stat_seq_ops;
  61#endif
  62
  63static struct hlist_head *neigh_get_dev_table(struct net_device *dev, int family)
  64{
  65	int i;
  66
  67	switch (family) {
  68	default:
  69		DEBUG_NET_WARN_ON_ONCE(1);
  70		fallthrough; /* to avoid panic by null-ptr-deref */
  71	case AF_INET:
  72		i = NEIGH_ARP_TABLE;
  73		break;
  74	case AF_INET6:
  75		i = NEIGH_ND_TABLE;
  76		break;
  77	}
  78
  79	return &dev->neighbours[i];
  80}
  81
  82/*
  83   Neighbour hash table buckets are protected with rwlock tbl->lock.
  84
  85   - All the scans/updates to hash buckets MUST be made under this lock.
  86   - NOTHING clever should be made under this lock: no callbacks
  87     to protocol backends, no attempts to send something to network.
  88     It will result in deadlocks, if backend/driver wants to use neighbour
  89     cache.
  90   - If the entry requires some non-trivial actions, increase
  91     its reference count and release table lock.
  92
  93   Neighbour entries are protected:
  94   - with reference count.
  95   - with rwlock neigh->lock
  96
  97   Reference count prevents destruction.
  98
  99   neigh->lock mainly serializes ll address data and its validity state.
 100   However, the same lock is used to protect another entry fields:
 101    - timer
 102    - resolution queue
 103
 104   Again, nothing clever shall be made under neigh->lock,
 105   the most complicated procedure, which we allow is dev->hard_header.
 106   It is supposed, that dev->hard_header is simplistic and does
 107   not make callbacks to neighbour tables.
 108 */
 109
 110static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
 111{
 112	kfree_skb(skb);
 113	return -ENETDOWN;
 114}
 115
 116static void neigh_cleanup_and_release(struct neighbour *neigh)
 117{
 
 
 
 118	trace_neigh_cleanup_and_release(neigh, 0);
 119	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
 120	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
 121	neigh_release(neigh);
 122}
 123
 124/*
 125 * It is random distribution in the interval (1/2)*base...(3/2)*base.
 126 * It corresponds to default IPv6 settings and is not overridable,
 127 * because it is really reasonable choice.
 128 */
 129
 130unsigned long neigh_rand_reach_time(unsigned long base)
 131{
 132	return base ? get_random_u32_below(base) + (base >> 1) : 0;
 133}
 134EXPORT_SYMBOL(neigh_rand_reach_time);
 135
 136static void neigh_mark_dead(struct neighbour *n)
 137{
 138	n->dead = 1;
 139	if (!list_empty(&n->gc_list)) {
 140		list_del_init(&n->gc_list);
 141		atomic_dec(&n->tbl->gc_entries);
 142	}
 143	if (!list_empty(&n->managed_list))
 144		list_del_init(&n->managed_list);
 145}
 146
 147static void neigh_update_gc_list(struct neighbour *n)
 148{
 149	bool on_gc_list, exempt_from_gc;
 150
 151	write_lock_bh(&n->tbl->lock);
 152	write_lock(&n->lock);
 153	if (n->dead)
 154		goto out;
 155
 156	/* remove from the gc list if new state is permanent or if neighbor
 157	 * is externally learned; otherwise entry should be on the gc list
 158	 */
 159	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
 160			 n->flags & NTF_EXT_LEARNED;
 161	on_gc_list = !list_empty(&n->gc_list);
 162
 163	if (exempt_from_gc && on_gc_list) {
 164		list_del_init(&n->gc_list);
 165		atomic_dec(&n->tbl->gc_entries);
 166	} else if (!exempt_from_gc && !on_gc_list) {
 167		/* add entries to the tail; cleaning removes from the front */
 168		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 169		atomic_inc(&n->tbl->gc_entries);
 170	}
 171out:
 172	write_unlock(&n->lock);
 173	write_unlock_bh(&n->tbl->lock);
 174}
 175
 176static void neigh_update_managed_list(struct neighbour *n)
 177{
 178	bool on_managed_list, add_to_managed;
 179
 180	write_lock_bh(&n->tbl->lock);
 181	write_lock(&n->lock);
 182	if (n->dead)
 183		goto out;
 184
 185	add_to_managed = n->flags & NTF_MANAGED;
 186	on_managed_list = !list_empty(&n->managed_list);
 187
 188	if (!add_to_managed && on_managed_list)
 189		list_del_init(&n->managed_list);
 190	else if (add_to_managed && !on_managed_list)
 191		list_add_tail(&n->managed_list, &n->tbl->managed_list);
 192out:
 193	write_unlock(&n->lock);
 194	write_unlock_bh(&n->tbl->lock);
 195}
 196
 197static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
 198			       bool *gc_update, bool *managed_update)
 199{
 200	u32 ndm_flags, old_flags = neigh->flags;
 
 201
 202	if (!(flags & NEIGH_UPDATE_F_ADMIN))
 203		return;
 204
 205	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
 206	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
 207
 208	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
 209		if (ndm_flags & NTF_EXT_LEARNED)
 210			neigh->flags |= NTF_EXT_LEARNED;
 211		else
 212			neigh->flags &= ~NTF_EXT_LEARNED;
 
 213		*notify = 1;
 214		*gc_update = true;
 215	}
 216	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
 217		if (ndm_flags & NTF_MANAGED)
 218			neigh->flags |= NTF_MANAGED;
 219		else
 220			neigh->flags &= ~NTF_MANAGED;
 221		*notify = 1;
 222		*managed_update = true;
 223	}
 
 
 224}
 225
 226bool neigh_remove_one(struct neighbour *n)
 
 227{
 228	bool retval = false;
 229
 230	write_lock(&n->lock);
 231	if (refcount_read(&n->refcnt) == 1) {
 232		hlist_del_rcu(&n->hash);
 233		hlist_del_rcu(&n->dev_list);
 
 
 
 234		neigh_mark_dead(n);
 235		retval = true;
 236	}
 237	write_unlock(&n->lock);
 238	if (retval)
 239		neigh_cleanup_and_release(n);
 240	return retval;
 241}
 242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243static int neigh_forced_gc(struct neigh_table *tbl)
 244{
 245	int max_clean = atomic_read(&tbl->gc_entries) -
 246			READ_ONCE(tbl->gc_thresh2);
 247	u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
 248	unsigned long tref = jiffies - 5 * HZ;
 249	struct neighbour *n, *tmp;
 250	int shrunk = 0;
 251	int loop = 0;
 252
 253	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 254
 255	write_lock_bh(&tbl->lock);
 256
 257	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
 258		if (refcount_read(&n->refcnt) == 1) {
 259			bool remove = false;
 260
 261			write_lock(&n->lock);
 262			if ((n->nud_state == NUD_FAILED) ||
 263			    (n->nud_state == NUD_NOARP) ||
 264			    (tbl->is_multicast &&
 265			     tbl->is_multicast(n->primary_key)) ||
 266			    !time_in_range(n->updated, tref, jiffies))
 267				remove = true;
 268			write_unlock(&n->lock);
 269
 270			if (remove && neigh_remove_one(n))
 271				shrunk++;
 272			if (shrunk >= max_clean)
 273				break;
 274			if (++loop == 16) {
 275				if (ktime_get_ns() > tmax)
 276					goto unlock;
 277				loop = 0;
 278			}
 279		}
 280	}
 281
 282	WRITE_ONCE(tbl->last_flush, jiffies);
 283unlock:
 284	write_unlock_bh(&tbl->lock);
 285
 286	return shrunk;
 287}
 288
 289static void neigh_add_timer(struct neighbour *n, unsigned long when)
 290{
 291	/* Use safe distance from the jiffies - LONG_MAX point while timer
 292	 * is running in DELAY/PROBE state but still show to user space
 293	 * large times in the past.
 294	 */
 295	unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
 296
 297	neigh_hold(n);
 298	if (!time_in_range(n->confirmed, mint, jiffies))
 299		n->confirmed = mint;
 300	if (time_before(n->used, n->confirmed))
 301		n->used = n->confirmed;
 302	if (unlikely(mod_timer(&n->timer, when))) {
 303		printk("NEIGH: BUG, double timer add, state is %x\n",
 304		       n->nud_state);
 305		dump_stack();
 306	}
 307}
 308
 309static int neigh_del_timer(struct neighbour *n)
 310{
 311	if ((n->nud_state & NUD_IN_TIMER) &&
 312	    del_timer(&n->timer)) {
 313		neigh_release(n);
 314		return 1;
 315	}
 316	return 0;
 317}
 318
 319static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
 320						   int family)
 321{
 322	switch (family) {
 323	case AF_INET:
 324		return __in_dev_arp_parms_get_rcu(dev);
 325	case AF_INET6:
 326		return __in6_dev_nd_parms_get_rcu(dev);
 327	}
 328	return NULL;
 329}
 330
 331static void neigh_parms_qlen_dec(struct net_device *dev, int family)
 332{
 333	struct neigh_parms *p;
 334
 335	rcu_read_lock();
 336	p = neigh_get_dev_parms_rcu(dev, family);
 337	if (p)
 338		p->qlen--;
 339	rcu_read_unlock();
 340}
 341
 342static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
 343			       int family)
 344{
 345	struct sk_buff_head tmp;
 346	unsigned long flags;
 347	struct sk_buff *skb;
 348
 349	skb_queue_head_init(&tmp);
 350	spin_lock_irqsave(&list->lock, flags);
 351	skb = skb_peek(list);
 352	while (skb != NULL) {
 353		struct sk_buff *skb_next = skb_peek_next(skb, list);
 354		struct net_device *dev = skb->dev;
 355
 356		if (net == NULL || net_eq(dev_net(dev), net)) {
 357			neigh_parms_qlen_dec(dev, family);
 358			__skb_unlink(skb, list);
 359			__skb_queue_tail(&tmp, skb);
 360		}
 361		skb = skb_next;
 362	}
 363	spin_unlock_irqrestore(&list->lock, flags);
 364
 365	while ((skb = __skb_dequeue(&tmp))) {
 366		dev_put(skb->dev);
 367		kfree_skb(skb);
 368	}
 369}
 370
 371static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
 372			    bool skip_perm)
 373{
 374	struct hlist_head *dev_head;
 375	struct hlist_node *tmp;
 376	struct neighbour *n;
 377
 378	dev_head = neigh_get_dev_table(dev, tbl->family);
 
 379
 380	hlist_for_each_entry_safe(n, tmp, dev_head, dev_list) {
 381		if (skip_perm && n->nud_state & NUD_PERMANENT)
 382			continue;
 383
 384		hlist_del_rcu(&n->hash);
 385		hlist_del_rcu(&n->dev_list);
 386		write_lock(&n->lock);
 387		neigh_del_timer(n);
 388		neigh_mark_dead(n);
 389		if (refcount_read(&n->refcnt) != 1) {
 390			/* The most unpleasant situation.
 391			 * We must destroy neighbour entry,
 392			 * but someone still uses it.
 393			 *
 394			 * The destroy will be delayed until
 395			 * the last user releases us, but
 396			 * we must kill timers etc. and move
 397			 * it to safe state.
 398			 */
 399			__skb_queue_purge(&n->arp_queue);
 400			n->arp_queue_len_bytes = 0;
 401			WRITE_ONCE(n->output, neigh_blackhole);
 402			if (n->nud_state & NUD_VALID)
 403				n->nud_state = NUD_NOARP;
 404			else
 405				n->nud_state = NUD_NONE;
 406			neigh_dbg(2, "neigh %p is stray\n", n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407		}
 408		write_unlock(&n->lock);
 409		neigh_cleanup_and_release(n);
 410	}
 411}
 412
 413void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 414{
 415	write_lock_bh(&tbl->lock);
 416	neigh_flush_dev(tbl, dev, false);
 417	write_unlock_bh(&tbl->lock);
 418}
 419EXPORT_SYMBOL(neigh_changeaddr);
 420
 421static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
 422			  bool skip_perm)
 423{
 424	write_lock_bh(&tbl->lock);
 425	neigh_flush_dev(tbl, dev, skip_perm);
 426	pneigh_ifdown_and_unlock(tbl, dev);
 427	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
 428			   tbl->family);
 429	if (skb_queue_empty_lockless(&tbl->proxy_queue))
 430		del_timer_sync(&tbl->proxy_timer);
 431	return 0;
 432}
 433
 434int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
 435{
 436	__neigh_ifdown(tbl, dev, true);
 437	return 0;
 438}
 439EXPORT_SYMBOL(neigh_carrier_down);
 440
 441int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 442{
 443	__neigh_ifdown(tbl, dev, false);
 444	return 0;
 445}
 446EXPORT_SYMBOL(neigh_ifdown);
 447
 448static struct neighbour *neigh_alloc(struct neigh_table *tbl,
 449				     struct net_device *dev,
 450				     u32 flags, bool exempt_from_gc)
 451{
 452	struct neighbour *n = NULL;
 453	unsigned long now = jiffies;
 454	int entries, gc_thresh3;
 455
 456	if (exempt_from_gc)
 457		goto do_alloc;
 458
 459	entries = atomic_inc_return(&tbl->gc_entries) - 1;
 460	gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
 461	if (entries >= gc_thresh3 ||
 462	    (entries >= READ_ONCE(tbl->gc_thresh2) &&
 463	     time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
 464		if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
 465			net_info_ratelimited("%s: neighbor table overflow!\n",
 466					     tbl->id);
 467			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 468			goto out_entries;
 469		}
 470	}
 471
 472do_alloc:
 473	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
 474	if (!n)
 475		goto out_entries;
 476
 477	__skb_queue_head_init(&n->arp_queue);
 478	rwlock_init(&n->lock);
 479	seqlock_init(&n->ha_lock);
 480	n->updated	  = n->used = now;
 481	n->nud_state	  = NUD_NONE;
 482	n->output	  = neigh_blackhole;
 483	n->flags	  = flags;
 484	seqlock_init(&n->hh.hh_lock);
 485	n->parms	  = neigh_parms_clone(&tbl->parms);
 486	timer_setup(&n->timer, neigh_timer_handler, 0);
 487
 488	NEIGH_CACHE_STAT_INC(tbl, allocs);
 489	n->tbl		  = tbl;
 490	refcount_set(&n->refcnt, 1);
 491	n->dead		  = 1;
 492	INIT_LIST_HEAD(&n->gc_list);
 493	INIT_LIST_HEAD(&n->managed_list);
 494
 495	atomic_inc(&tbl->entries);
 496out:
 497	return n;
 498
 499out_entries:
 500	if (!exempt_from_gc)
 501		atomic_dec(&tbl->gc_entries);
 502	goto out;
 503}
 504
 505static void neigh_get_hash_rnd(u32 *x)
 506{
 507	*x = get_random_u32() | 1;
 508}
 509
 510static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 511{
 512	size_t size = (1 << shift) * sizeof(struct hlist_head);
 513	struct hlist_head *hash_heads;
 514	struct neigh_hash_table *ret;
 
 515	int i;
 516
 517	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 518	if (!ret)
 519		return NULL;
 520
 521	hash_heads = kvzalloc(size, GFP_ATOMIC);
 522	if (!hash_heads) {
 
 
 
 
 
 
 523		kfree(ret);
 524		return NULL;
 525	}
 526	ret->hash_heads = hash_heads;
 527	ret->hash_shift = shift;
 528	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
 529		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 530	return ret;
 531}
 532
 533static void neigh_hash_free_rcu(struct rcu_head *head)
 534{
 535	struct neigh_hash_table *nht = container_of(head,
 536						    struct neigh_hash_table,
 537						    rcu);
 
 
 538
 539	kvfree(nht->hash_heads);
 
 
 
 
 
 540	kfree(nht);
 541}
 542
 543static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
 544						unsigned long new_shift)
 545{
 546	unsigned int i, hash;
 547	struct neigh_hash_table *new_nht, *old_nht;
 548
 549	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 550
 551	old_nht = rcu_dereference_protected(tbl->nht,
 552					    lockdep_is_held(&tbl->lock));
 553	new_nht = neigh_hash_alloc(new_shift);
 554	if (!new_nht)
 555		return old_nht;
 556
 557	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
 558		struct hlist_node *tmp;
 559		struct neighbour *n;
 560
 561		neigh_for_each_in_bucket_safe(n, tmp, &old_nht->hash_heads[i]) {
 
 
 
 562			hash = tbl->hash(n->primary_key, n->dev,
 563					 new_nht->hash_rnd);
 564
 565			hash >>= (32 - new_nht->hash_shift);
 
 
 566
 567			hlist_del_rcu(&n->hash);
 568			hlist_add_head_rcu(&n->hash, &new_nht->hash_heads[hash]);
 
 
 
 569		}
 570	}
 571
 572	rcu_assign_pointer(tbl->nht, new_nht);
 573	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
 574	return new_nht;
 575}
 576
 577struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 578			       struct net_device *dev)
 579{
 580	struct neighbour *n;
 581
 582	NEIGH_CACHE_STAT_INC(tbl, lookups);
 583
 584	rcu_read_lock();
 585	n = __neigh_lookup_noref(tbl, pkey, dev);
 586	if (n) {
 587		if (!refcount_inc_not_zero(&n->refcnt))
 588			n = NULL;
 589		NEIGH_CACHE_STAT_INC(tbl, hits);
 590	}
 591
 592	rcu_read_unlock();
 593	return n;
 594}
 595EXPORT_SYMBOL(neigh_lookup);
 596
 597static struct neighbour *
 598___neigh_create(struct neigh_table *tbl, const void *pkey,
 599		struct net_device *dev, u32 flags,
 600		bool exempt_from_gc, bool want_ref)
 601{
 602	u32 hash_val, key_len = tbl->key_len;
 603	struct neighbour *n1, *rc, *n;
 
 604	struct neigh_hash_table *nht;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 605	int error;
 
 606
 607	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
 608	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
 
 609	if (!n) {
 610		rc = ERR_PTR(-ENOBUFS);
 611		goto out;
 612	}
 613
 614	memcpy(n->primary_key, pkey, key_len);
 615	n->dev = dev;
 616	netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
 617
 618	/* Protocol specific setup. */
 619	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
 620		rc = ERR_PTR(error);
 621		goto out_neigh_release;
 622	}
 623
 624	if (dev->netdev_ops->ndo_neigh_construct) {
 625		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
 626		if (error < 0) {
 627			rc = ERR_PTR(error);
 628			goto out_neigh_release;
 629		}
 630	}
 631
 632	/* Device specific setup. */
 633	if (n->parms->neigh_setup &&
 634	    (error = n->parms->neigh_setup(n)) < 0) {
 635		rc = ERR_PTR(error);
 636		goto out_neigh_release;
 637	}
 638
 639	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 640
 641	write_lock_bh(&tbl->lock);
 642	nht = rcu_dereference_protected(tbl->nht,
 643					lockdep_is_held(&tbl->lock));
 644
 645	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 646		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 647
 648	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 649
 650	if (n->parms->dead) {
 651		rc = ERR_PTR(-EINVAL);
 652		goto out_tbl_unlock;
 653	}
 654
 655	neigh_for_each_in_bucket(n1, &nht->hash_heads[hash_val]) {
 
 
 
 
 656		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
 657			if (want_ref)
 658				neigh_hold(n1);
 659			rc = n1;
 660			goto out_tbl_unlock;
 661		}
 662	}
 663
 664	n->dead = 0;
 665	if (!exempt_from_gc)
 666		list_add_tail(&n->gc_list, &n->tbl->gc_list);
 667	if (n->flags & NTF_MANAGED)
 668		list_add_tail(&n->managed_list, &n->tbl->managed_list);
 669	if (want_ref)
 670		neigh_hold(n);
 671	hlist_add_head_rcu(&n->hash, &nht->hash_heads[hash_val]);
 672
 673	hlist_add_head_rcu(&n->dev_list,
 674			   neigh_get_dev_table(dev, tbl->family));
 675
 676	write_unlock_bh(&tbl->lock);
 677	neigh_dbg(2, "neigh %p is created\n", n);
 678	rc = n;
 679out:
 680	return rc;
 681out_tbl_unlock:
 682	write_unlock_bh(&tbl->lock);
 683out_neigh_release:
 684	if (!exempt_from_gc)
 685		atomic_dec(&tbl->gc_entries);
 686	neigh_release(n);
 687	goto out;
 688}
 689
 690struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 691				 struct net_device *dev, bool want_ref)
 692{
 693	bool exempt_from_gc = !!(dev->flags & IFF_LOOPBACK);
 694
 695	return ___neigh_create(tbl, pkey, dev, 0, exempt_from_gc, want_ref);
 696}
 697EXPORT_SYMBOL(__neigh_create);
 698
 699static u32 pneigh_hash(const void *pkey, unsigned int key_len)
 700{
 701	u32 hash_val = *(u32 *)(pkey + key_len - 4);
 702	hash_val ^= (hash_val >> 16);
 703	hash_val ^= hash_val >> 8;
 704	hash_val ^= hash_val >> 4;
 705	hash_val &= PNEIGH_HASHMASK;
 706	return hash_val;
 707}
 708
 709static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
 710					      struct net *net,
 711					      const void *pkey,
 712					      unsigned int key_len,
 713					      struct net_device *dev)
 714{
 715	while (n) {
 716		if (!memcmp(n->key, pkey, key_len) &&
 717		    net_eq(pneigh_net(n), net) &&
 718		    (n->dev == dev || !n->dev))
 719			return n;
 720		n = n->next;
 721	}
 722	return NULL;
 723}
 724
 725struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
 726		struct net *net, const void *pkey, struct net_device *dev)
 727{
 728	unsigned int key_len = tbl->key_len;
 729	u32 hash_val = pneigh_hash(pkey, key_len);
 730
 731	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 732				 net, pkey, key_len, dev);
 733}
 734EXPORT_SYMBOL_GPL(__pneigh_lookup);
 735
 736struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 737				    struct net *net, const void *pkey,
 738				    struct net_device *dev, int creat)
 739{
 740	struct pneigh_entry *n;
 741	unsigned int key_len = tbl->key_len;
 742	u32 hash_val = pneigh_hash(pkey, key_len);
 743
 744	read_lock_bh(&tbl->lock);
 745	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
 746			      net, pkey, key_len, dev);
 747	read_unlock_bh(&tbl->lock);
 748
 749	if (n || !creat)
 750		goto out;
 751
 752	ASSERT_RTNL();
 753
 754	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
 755	if (!n)
 756		goto out;
 757
 
 758	write_pnet(&n->net, net);
 759	memcpy(n->key, pkey, key_len);
 760	n->dev = dev;
 761	netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
 
 762
 763	if (tbl->pconstructor && tbl->pconstructor(n)) {
 764		netdev_put(dev, &n->dev_tracker);
 
 765		kfree(n);
 766		n = NULL;
 767		goto out;
 768	}
 769
 770	write_lock_bh(&tbl->lock);
 771	n->next = tbl->phash_buckets[hash_val];
 772	tbl->phash_buckets[hash_val] = n;
 773	write_unlock_bh(&tbl->lock);
 774out:
 775	return n;
 776}
 777EXPORT_SYMBOL(pneigh_lookup);
 778
 779
 780int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
 781		  struct net_device *dev)
 782{
 783	struct pneigh_entry *n, **np;
 784	unsigned int key_len = tbl->key_len;
 785	u32 hash_val = pneigh_hash(pkey, key_len);
 786
 787	write_lock_bh(&tbl->lock);
 788	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
 789	     np = &n->next) {
 790		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
 791		    net_eq(pneigh_net(n), net)) {
 792			*np = n->next;
 793			write_unlock_bh(&tbl->lock);
 794			if (tbl->pdestructor)
 795				tbl->pdestructor(n);
 796			netdev_put(n->dev, &n->dev_tracker);
 
 797			kfree(n);
 798			return 0;
 799		}
 800	}
 801	write_unlock_bh(&tbl->lock);
 802	return -ENOENT;
 803}
 804
 805static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
 806				    struct net_device *dev)
 807{
 808	struct pneigh_entry *n, **np, *freelist = NULL;
 809	u32 h;
 810
 811	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 812		np = &tbl->phash_buckets[h];
 813		while ((n = *np) != NULL) {
 814			if (!dev || n->dev == dev) {
 815				*np = n->next;
 816				n->next = freelist;
 817				freelist = n;
 818				continue;
 819			}
 820			np = &n->next;
 821		}
 822	}
 823	write_unlock_bh(&tbl->lock);
 824	while ((n = freelist)) {
 825		freelist = n->next;
 826		n->next = NULL;
 827		if (tbl->pdestructor)
 828			tbl->pdestructor(n);
 829		netdev_put(n->dev, &n->dev_tracker);
 
 830		kfree(n);
 831	}
 832	return -ENOENT;
 833}
 834
 835static void neigh_parms_destroy(struct neigh_parms *parms);
 836
 837static inline void neigh_parms_put(struct neigh_parms *parms)
 838{
 839	if (refcount_dec_and_test(&parms->refcnt))
 840		neigh_parms_destroy(parms);
 841}
 842
 843/*
 844 *	neighbour must already be out of the table;
 845 *
 846 */
 847void neigh_destroy(struct neighbour *neigh)
 848{
 849	struct net_device *dev = neigh->dev;
 850
 851	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 852
 853	if (!neigh->dead) {
 854		pr_warn("Destroying alive neighbour %p\n", neigh);
 855		dump_stack();
 856		return;
 857	}
 858
 859	if (neigh_del_timer(neigh))
 860		pr_warn("Impossible event\n");
 861
 862	write_lock_bh(&neigh->lock);
 863	__skb_queue_purge(&neigh->arp_queue);
 864	write_unlock_bh(&neigh->lock);
 865	neigh->arp_queue_len_bytes = 0;
 866
 867	if (dev->netdev_ops->ndo_neigh_destroy)
 868		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
 869
 870	netdev_put(dev, &neigh->dev_tracker);
 871	neigh_parms_put(neigh->parms);
 872
 873	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
 874
 875	atomic_dec(&neigh->tbl->entries);
 876	kfree_rcu(neigh, rcu);
 877}
 878EXPORT_SYMBOL(neigh_destroy);
 879
 880/* Neighbour state is suspicious;
 881   disable fast path.
 882
 883   Called with write_locked neigh.
 884 */
 885static void neigh_suspect(struct neighbour *neigh)
 886{
 887	neigh_dbg(2, "neigh %p is suspected\n", neigh);
 888
 889	WRITE_ONCE(neigh->output, neigh->ops->output);
 890}
 891
 892/* Neighbour state is OK;
 893   enable fast path.
 894
 895   Called with write_locked neigh.
 896 */
 897static void neigh_connect(struct neighbour *neigh)
 898{
 899	neigh_dbg(2, "neigh %p is connected\n", neigh);
 900
 901	WRITE_ONCE(neigh->output, neigh->ops->connected_output);
 902}
 903
 904static void neigh_periodic_work(struct work_struct *work)
 905{
 906	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
 907	struct neigh_hash_table *nht;
 908	struct hlist_node *tmp;
 909	struct neighbour *n;
 
 910	unsigned int i;
 
 911
 912	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 913
 914	write_lock_bh(&tbl->lock);
 915	nht = rcu_dereference_protected(tbl->nht,
 916					lockdep_is_held(&tbl->lock));
 917
 918	/*
 919	 *	periodically recompute ReachableTime from random function
 920	 */
 921
 922	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
 923		struct neigh_parms *p;
 924
 925		WRITE_ONCE(tbl->last_rand, jiffies);
 926		list_for_each_entry(p, &tbl->parms_list, list)
 927			p->reachable_time =
 928				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 929	}
 930
 931	if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
 932		goto out;
 933
 934	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
 935		neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[i]) {
 
 
 
 936			unsigned int state;
 937
 938			write_lock(&n->lock);
 939
 940			state = n->nud_state;
 941			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
 942			    (n->flags & NTF_EXT_LEARNED)) {
 943				write_unlock(&n->lock);
 944				continue;
 945			}
 946
 947			if (time_before(n->used, n->confirmed) &&
 948			    time_is_before_eq_jiffies(n->confirmed))
 949				n->used = n->confirmed;
 950
 951			if (refcount_read(&n->refcnt) == 1 &&
 952			    (state == NUD_FAILED ||
 953			     !time_in_range_open(jiffies, n->used,
 954						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 955				hlist_del_rcu(&n->hash);
 956				hlist_del_rcu(&n->dev_list);
 957				neigh_mark_dead(n);
 958				write_unlock(&n->lock);
 959				neigh_cleanup_and_release(n);
 960				continue;
 961			}
 962			write_unlock(&n->lock);
 
 
 
 963		}
 964		/*
 965		 * It's fine to release lock here, even if hash table
 966		 * grows while we are preempted.
 967		 */
 968		write_unlock_bh(&tbl->lock);
 969		cond_resched();
 970		write_lock_bh(&tbl->lock);
 971		nht = rcu_dereference_protected(tbl->nht,
 972						lockdep_is_held(&tbl->lock));
 973	}
 974out:
 975	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
 976	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
 977	 * BASE_REACHABLE_TIME.
 978	 */
 979	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
 980			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
 981	write_unlock_bh(&tbl->lock);
 982}
 983
 984static __inline__ int neigh_max_probes(struct neighbour *n)
 985{
 986	struct neigh_parms *p = n->parms;
 987	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
 988	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
 989	        NEIGH_VAR(p, MCAST_PROBES));
 990}
 991
 992static void neigh_invalidate(struct neighbour *neigh)
 993	__releases(neigh->lock)
 994	__acquires(neigh->lock)
 995{
 996	struct sk_buff *skb;
 997
 998	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
 999	neigh_dbg(2, "neigh %p is failed\n", neigh);
1000	neigh->updated = jiffies;
1001
1002	/* It is very thin place. report_unreachable is very complicated
1003	   routine. Particularly, it can hit the same neighbour entry!
1004
1005	   So that, we try to be accurate and avoid dead loop. --ANK
1006	 */
1007	while (neigh->nud_state == NUD_FAILED &&
1008	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1009		write_unlock(&neigh->lock);
1010		neigh->ops->error_report(neigh, skb);
1011		write_lock(&neigh->lock);
1012	}
1013	__skb_queue_purge(&neigh->arp_queue);
1014	neigh->arp_queue_len_bytes = 0;
1015}
1016
1017static void neigh_probe(struct neighbour *neigh)
1018	__releases(neigh->lock)
1019{
1020	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1021	/* keep skb alive even if arp_queue overflows */
1022	if (skb)
1023		skb = skb_clone(skb, GFP_ATOMIC);
1024	write_unlock(&neigh->lock);
1025	if (neigh->ops->solicit)
1026		neigh->ops->solicit(neigh, skb);
1027	atomic_inc(&neigh->probes);
1028	consume_skb(skb);
1029}
1030
1031/* Called when a timer expires for a neighbour entry. */
1032
1033static void neigh_timer_handler(struct timer_list *t)
1034{
1035	unsigned long now, next;
1036	struct neighbour *neigh = from_timer(neigh, t, timer);
1037	unsigned int state;
1038	int notify = 0;
1039
1040	write_lock(&neigh->lock);
1041
1042	state = neigh->nud_state;
1043	now = jiffies;
1044	next = now + HZ;
1045
1046	if (!(state & NUD_IN_TIMER))
1047		goto out;
1048
1049	if (state & NUD_REACHABLE) {
1050		if (time_before_eq(now,
1051				   neigh->confirmed + neigh->parms->reachable_time)) {
1052			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1053			next = neigh->confirmed + neigh->parms->reachable_time;
1054		} else if (time_before_eq(now,
1055					  neigh->used +
1056					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1057			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1058			WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1059			neigh->updated = jiffies;
1060			neigh_suspect(neigh);
1061			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1062		} else {
1063			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1064			WRITE_ONCE(neigh->nud_state, NUD_STALE);
1065			neigh->updated = jiffies;
1066			neigh_suspect(neigh);
1067			notify = 1;
1068		}
1069	} else if (state & NUD_DELAY) {
1070		if (time_before_eq(now,
1071				   neigh->confirmed +
1072				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1073			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1074			WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1075			neigh->updated = jiffies;
1076			neigh_connect(neigh);
1077			notify = 1;
1078			next = neigh->confirmed + neigh->parms->reachable_time;
1079		} else {
1080			neigh_dbg(2, "neigh %p is probed\n", neigh);
1081			WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1082			neigh->updated = jiffies;
1083			atomic_set(&neigh->probes, 0);
1084			notify = 1;
1085			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1086					 HZ/100);
1087		}
1088	} else {
1089		/* NUD_PROBE|NUD_INCOMPLETE */
1090		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1091	}
1092
1093	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1094	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1095		WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1096		notify = 1;
1097		neigh_invalidate(neigh);
1098		goto out;
1099	}
1100
1101	if (neigh->nud_state & NUD_IN_TIMER) {
1102		if (time_before(next, jiffies + HZ/100))
1103			next = jiffies + HZ/100;
1104		if (!mod_timer(&neigh->timer, next))
1105			neigh_hold(neigh);
1106	}
1107	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1108		neigh_probe(neigh);
1109	} else {
1110out:
1111		write_unlock(&neigh->lock);
1112	}
1113
1114	if (notify)
1115		neigh_update_notify(neigh, 0);
1116
1117	trace_neigh_timer_handler(neigh, 0);
1118
1119	neigh_release(neigh);
1120}
1121
1122int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1123		       const bool immediate_ok)
1124{
1125	int rc;
1126	bool immediate_probe = false;
1127
1128	write_lock_bh(&neigh->lock);
1129
1130	rc = 0;
1131	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1132		goto out_unlock_bh;
1133	if (neigh->dead)
1134		goto out_dead;
1135
1136	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1137		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1138		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1139			unsigned long next, now = jiffies;
1140
1141			atomic_set(&neigh->probes,
1142				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1143			neigh_del_timer(neigh);
1144			WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1145			neigh->updated = now;
1146			if (!immediate_ok) {
1147				next = now + 1;
1148			} else {
1149				immediate_probe = true;
1150				next = now + max(NEIGH_VAR(neigh->parms,
1151							   RETRANS_TIME),
1152						 HZ / 100);
1153			}
1154			neigh_add_timer(neigh, next);
 
1155		} else {
1156			WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1157			neigh->updated = jiffies;
1158			write_unlock_bh(&neigh->lock);
1159
1160			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1161			return 1;
1162		}
1163	} else if (neigh->nud_state & NUD_STALE) {
1164		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1165		neigh_del_timer(neigh);
1166		WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1167		neigh->updated = jiffies;
1168		neigh_add_timer(neigh, jiffies +
1169				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1170	}
1171
1172	if (neigh->nud_state == NUD_INCOMPLETE) {
1173		if (skb) {
1174			while (neigh->arp_queue_len_bytes + skb->truesize >
1175			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1176				struct sk_buff *buff;
1177
1178				buff = __skb_dequeue(&neigh->arp_queue);
1179				if (!buff)
1180					break;
1181				neigh->arp_queue_len_bytes -= buff->truesize;
1182				kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1183				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1184			}
1185			skb_dst_force(skb);
1186			__skb_queue_tail(&neigh->arp_queue, skb);
1187			neigh->arp_queue_len_bytes += skb->truesize;
1188		}
1189		rc = 1;
1190	}
1191out_unlock_bh:
1192	if (immediate_probe)
1193		neigh_probe(neigh);
1194	else
1195		write_unlock(&neigh->lock);
1196	local_bh_enable();
1197	trace_neigh_event_send_done(neigh, rc);
1198	return rc;
1199
1200out_dead:
1201	if (neigh->nud_state & NUD_STALE)
1202		goto out_unlock_bh;
1203	write_unlock_bh(&neigh->lock);
1204	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1205	trace_neigh_event_send_dead(neigh, 1);
1206	return 1;
1207}
1208EXPORT_SYMBOL(__neigh_event_send);
1209
1210static void neigh_update_hhs(struct neighbour *neigh)
1211{
1212	struct hh_cache *hh;
1213	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1214		= NULL;
1215
1216	if (neigh->dev->header_ops)
1217		update = neigh->dev->header_ops->cache_update;
1218
1219	if (update) {
1220		hh = &neigh->hh;
1221		if (READ_ONCE(hh->hh_len)) {
1222			write_seqlock_bh(&hh->hh_lock);
1223			update(hh, neigh->dev, neigh->ha);
1224			write_sequnlock_bh(&hh->hh_lock);
1225		}
1226	}
1227}
1228
 
 
1229/* Generic update routine.
1230   -- lladdr is new lladdr or NULL, if it is not supplied.
1231   -- new    is new state.
1232   -- flags
1233	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1234				if it is different.
1235	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1236				lladdr instead of overriding it
1237				if it is different.
1238	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1239	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1240	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
1241	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1242				NTF_ROUTER flag.
1243	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1244				a router.
1245
1246   Caller MUST hold reference count on the entry.
1247 */
 
1248static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1249			  u8 new, u32 flags, u32 nlmsg_pid,
1250			  struct netlink_ext_ack *extack)
1251{
1252	bool gc_update = false, managed_update = false;
 
 
 
 
1253	int update_isrouter = 0;
1254	struct net_device *dev;
1255	int err, notify = 0;
1256	u8 old;
1257
1258	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1259
1260	write_lock_bh(&neigh->lock);
1261
1262	dev    = neigh->dev;
1263	old    = neigh->nud_state;
1264	err    = -EPERM;
1265
 
 
 
1266	if (neigh->dead) {
1267		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1268		new = old;
1269		goto out;
1270	}
1271	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1272	    (old & (NUD_NOARP | NUD_PERMANENT)))
1273		goto out;
1274
1275	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1276	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1277		new = old & ~NUD_PERMANENT;
1278		WRITE_ONCE(neigh->nud_state, new);
1279		err = 0;
1280		goto out;
1281	}
1282
1283	if (!(new & NUD_VALID)) {
1284		neigh_del_timer(neigh);
1285		if (old & NUD_CONNECTED)
1286			neigh_suspect(neigh);
1287		WRITE_ONCE(neigh->nud_state, new);
1288		err = 0;
1289		notify = old & NUD_VALID;
1290		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1291		    (new & NUD_FAILED)) {
1292			neigh_invalidate(neigh);
1293			notify = 1;
1294		}
1295		goto out;
1296	}
1297
1298	/* Compare new lladdr with cached one */
1299	if (!dev->addr_len) {
1300		/* First case: device needs no address. */
1301		lladdr = neigh->ha;
1302	} else if (lladdr) {
1303		/* The second case: if something is already cached
1304		   and a new address is proposed:
1305		   - compare new & old
1306		   - if they are different, check override flag
1307		 */
1308		if ((old & NUD_VALID) &&
1309		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1310			lladdr = neigh->ha;
1311	} else {
1312		/* No address is supplied; if we know something,
1313		   use it, otherwise discard the request.
1314		 */
1315		err = -EINVAL;
1316		if (!(old & NUD_VALID)) {
1317			NL_SET_ERR_MSG(extack, "No link layer address given");
1318			goto out;
1319		}
1320		lladdr = neigh->ha;
1321	}
1322
1323	/* Update confirmed timestamp for neighbour entry after we
1324	 * received ARP packet even if it doesn't change IP to MAC binding.
1325	 */
1326	if (new & NUD_CONNECTED)
1327		neigh->confirmed = jiffies;
1328
1329	/* If entry was valid and address is not changed,
1330	   do not change entry state, if new one is STALE.
1331	 */
1332	err = 0;
1333	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1334	if (old & NUD_VALID) {
1335		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1336			update_isrouter = 0;
1337			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1338			    (old & NUD_CONNECTED)) {
1339				lladdr = neigh->ha;
1340				new = NUD_STALE;
1341			} else
1342				goto out;
1343		} else {
1344			if (lladdr == neigh->ha && new == NUD_STALE &&
1345			    !(flags & NEIGH_UPDATE_F_ADMIN))
1346				new = old;
1347		}
1348	}
1349
1350	/* Update timestamp only once we know we will make a change to the
1351	 * neighbour entry. Otherwise we risk to move the locktime window with
1352	 * noop updates and ignore relevant ARP updates.
1353	 */
1354	if (new != old || lladdr != neigh->ha)
1355		neigh->updated = jiffies;
1356
1357	if (new != old) {
1358		neigh_del_timer(neigh);
1359		if (new & NUD_PROBE)
1360			atomic_set(&neigh->probes, 0);
1361		if (new & NUD_IN_TIMER)
1362			neigh_add_timer(neigh, (jiffies +
1363						((new & NUD_REACHABLE) ?
1364						 neigh->parms->reachable_time :
1365						 0)));
1366		WRITE_ONCE(neigh->nud_state, new);
1367		notify = 1;
1368	}
1369
1370	if (lladdr != neigh->ha) {
1371		write_seqlock(&neigh->ha_lock);
1372		memcpy(&neigh->ha, lladdr, dev->addr_len);
1373		write_sequnlock(&neigh->ha_lock);
1374		neigh_update_hhs(neigh);
1375		if (!(new & NUD_CONNECTED))
1376			neigh->confirmed = jiffies -
1377				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1378		notify = 1;
1379	}
1380	if (new == old)
1381		goto out;
1382	if (new & NUD_CONNECTED)
1383		neigh_connect(neigh);
1384	else
1385		neigh_suspect(neigh);
1386	if (!(old & NUD_VALID)) {
1387		struct sk_buff *skb;
1388
1389		/* Again: avoid dead loop if something went wrong */
1390
1391		while (neigh->nud_state & NUD_VALID &&
1392		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1393			struct dst_entry *dst = skb_dst(skb);
1394			struct neighbour *n2, *n1 = neigh;
1395			write_unlock_bh(&neigh->lock);
1396
1397			rcu_read_lock();
1398
1399			/* Why not just use 'neigh' as-is?  The problem is that
1400			 * things such as shaper, eql, and sch_teql can end up
1401			 * using alternative, different, neigh objects to output
1402			 * the packet in the output path.  So what we need to do
1403			 * here is re-lookup the top-level neigh in the path so
1404			 * we can reinject the packet there.
1405			 */
1406			n2 = NULL;
1407			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1408				n2 = dst_neigh_lookup_skb(dst, skb);
1409				if (n2)
1410					n1 = n2;
1411			}
1412			READ_ONCE(n1->output)(n1, skb);
1413			if (n2)
1414				neigh_release(n2);
1415			rcu_read_unlock();
1416
1417			write_lock_bh(&neigh->lock);
1418		}
1419		__skb_queue_purge(&neigh->arp_queue);
1420		neigh->arp_queue_len_bytes = 0;
1421	}
1422out:
1423	if (update_isrouter)
1424		neigh_update_is_router(neigh, flags, &notify);
1425	write_unlock_bh(&neigh->lock);
1426	if (((new ^ old) & NUD_PERMANENT) || gc_update)
 
1427		neigh_update_gc_list(neigh);
1428	if (managed_update)
1429		neigh_update_managed_list(neigh);
1430	if (notify)
1431		neigh_update_notify(neigh, nlmsg_pid);
 
1432	trace_neigh_update_done(neigh, err);
 
1433	return err;
1434}
1435
1436int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1437		 u32 flags, u32 nlmsg_pid)
1438{
1439	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1440}
1441EXPORT_SYMBOL(neigh_update);
1442
1443/* Update the neigh to listen temporarily for probe responses, even if it is
1444 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1445 */
1446void __neigh_set_probe_once(struct neighbour *neigh)
1447{
1448	if (neigh->dead)
1449		return;
1450	neigh->updated = jiffies;
1451	if (!(neigh->nud_state & NUD_FAILED))
1452		return;
1453	WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1454	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1455	neigh_add_timer(neigh,
1456			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1457				      HZ/100));
1458}
1459EXPORT_SYMBOL(__neigh_set_probe_once);
1460
1461struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1462				 u8 *lladdr, void *saddr,
1463				 struct net_device *dev)
1464{
1465	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1466						 lladdr || !dev->addr_len);
1467	if (neigh)
1468		neigh_update(neigh, lladdr, NUD_STALE,
1469			     NEIGH_UPDATE_F_OVERRIDE, 0);
1470	return neigh;
1471}
1472EXPORT_SYMBOL(neigh_event_ns);
1473
1474/* called with read_lock_bh(&n->lock); */
1475static void neigh_hh_init(struct neighbour *n)
1476{
1477	struct net_device *dev = n->dev;
1478	__be16 prot = n->tbl->protocol;
1479	struct hh_cache	*hh = &n->hh;
1480
1481	write_lock_bh(&n->lock);
1482
1483	/* Only one thread can come in here and initialize the
1484	 * hh_cache entry.
1485	 */
1486	if (!hh->hh_len)
1487		dev->header_ops->cache(n, hh, prot);
1488
1489	write_unlock_bh(&n->lock);
1490}
1491
1492/* Slow and careful. */
1493
1494int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1495{
1496	int rc = 0;
1497
1498	if (!neigh_event_send(neigh, skb)) {
1499		int err;
1500		struct net_device *dev = neigh->dev;
1501		unsigned int seq;
1502
1503		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1504			neigh_hh_init(neigh);
1505
1506		do {
1507			__skb_pull(skb, skb_network_offset(skb));
1508			seq = read_seqbegin(&neigh->ha_lock);
1509			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1510					      neigh->ha, NULL, skb->len);
1511		} while (read_seqretry(&neigh->ha_lock, seq));
1512
1513		if (err >= 0)
1514			rc = dev_queue_xmit(skb);
1515		else
1516			goto out_kfree_skb;
1517	}
1518out:
1519	return rc;
1520out_kfree_skb:
1521	rc = -EINVAL;
1522	kfree_skb(skb);
1523	goto out;
1524}
1525EXPORT_SYMBOL(neigh_resolve_output);
1526
1527/* As fast as possible without hh cache */
1528
1529int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1530{
1531	struct net_device *dev = neigh->dev;
1532	unsigned int seq;
1533	int err;
1534
1535	do {
1536		__skb_pull(skb, skb_network_offset(skb));
1537		seq = read_seqbegin(&neigh->ha_lock);
1538		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1539				      neigh->ha, NULL, skb->len);
1540	} while (read_seqretry(&neigh->ha_lock, seq));
1541
1542	if (err >= 0)
1543		err = dev_queue_xmit(skb);
1544	else {
1545		err = -EINVAL;
1546		kfree_skb(skb);
1547	}
1548	return err;
1549}
1550EXPORT_SYMBOL(neigh_connected_output);
1551
1552int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1553{
1554	return dev_queue_xmit(skb);
1555}
1556EXPORT_SYMBOL(neigh_direct_output);
1557
1558static void neigh_managed_work(struct work_struct *work)
1559{
1560	struct neigh_table *tbl = container_of(work, struct neigh_table,
1561					       managed_work.work);
1562	struct neighbour *neigh;
1563
1564	write_lock_bh(&tbl->lock);
1565	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1566		neigh_event_send_probe(neigh, NULL, false);
1567	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1568			   NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1569	write_unlock_bh(&tbl->lock);
1570}
1571
1572static void neigh_proxy_process(struct timer_list *t)
1573{
1574	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1575	long sched_next = 0;
1576	unsigned long now = jiffies;
1577	struct sk_buff *skb, *n;
1578
1579	spin_lock(&tbl->proxy_queue.lock);
1580
1581	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1582		long tdif = NEIGH_CB(skb)->sched_next - now;
1583
1584		if (tdif <= 0) {
1585			struct net_device *dev = skb->dev;
1586
1587			neigh_parms_qlen_dec(dev, tbl->family);
1588			__skb_unlink(skb, &tbl->proxy_queue);
1589
1590			if (tbl->proxy_redo && netif_running(dev)) {
1591				rcu_read_lock();
1592				tbl->proxy_redo(skb);
1593				rcu_read_unlock();
1594			} else {
1595				kfree_skb(skb);
1596			}
1597
1598			dev_put(dev);
1599		} else if (!sched_next || tdif < sched_next)
1600			sched_next = tdif;
1601	}
1602	del_timer(&tbl->proxy_timer);
1603	if (sched_next)
1604		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1605	spin_unlock(&tbl->proxy_queue.lock);
1606}
1607
1608static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1609{
1610	/* If proxy_delay is zero, do not call get_random_u32_below()
1611	 * as it is undefined behavior.
1612	 */
1613	unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1614
1615	return proxy_delay ?
1616	       jiffies + get_random_u32_below(proxy_delay) : jiffies;
1617}
1618
1619void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1620		    struct sk_buff *skb)
1621{
1622	unsigned long sched_next = neigh_proxy_delay(p);
 
 
 
1623
1624	if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1625		kfree_skb(skb);
1626		return;
1627	}
1628
1629	NEIGH_CB(skb)->sched_next = sched_next;
1630	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1631
1632	spin_lock(&tbl->proxy_queue.lock);
1633	if (del_timer(&tbl->proxy_timer)) {
1634		if (time_before(tbl->proxy_timer.expires, sched_next))
1635			sched_next = tbl->proxy_timer.expires;
1636	}
1637	skb_dst_drop(skb);
1638	dev_hold(skb->dev);
1639	__skb_queue_tail(&tbl->proxy_queue, skb);
1640	p->qlen++;
1641	mod_timer(&tbl->proxy_timer, sched_next);
1642	spin_unlock(&tbl->proxy_queue.lock);
1643}
1644EXPORT_SYMBOL(pneigh_enqueue);
1645
1646static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1647						      struct net *net, int ifindex)
1648{
1649	struct neigh_parms *p;
1650
1651	list_for_each_entry(p, &tbl->parms_list, list) {
1652		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1653		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1654			return p;
1655	}
1656
1657	return NULL;
1658}
1659
1660struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1661				      struct neigh_table *tbl)
1662{
1663	struct neigh_parms *p;
1664	struct net *net = dev_net(dev);
1665	const struct net_device_ops *ops = dev->netdev_ops;
1666
1667	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1668	if (p) {
1669		p->tbl		  = tbl;
1670		refcount_set(&p->refcnt, 1);
1671		p->reachable_time =
1672				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1673		p->qlen = 0;
1674		netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1675		p->dev = dev;
1676		write_pnet(&p->net, net);
1677		p->sysctl_table = NULL;
1678
1679		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1680			netdev_put(dev, &p->dev_tracker);
1681			kfree(p);
1682			return NULL;
1683		}
1684
1685		write_lock_bh(&tbl->lock);
1686		list_add(&p->list, &tbl->parms.list);
1687		write_unlock_bh(&tbl->lock);
1688
1689		neigh_parms_data_state_cleanall(p);
1690	}
1691	return p;
1692}
1693EXPORT_SYMBOL(neigh_parms_alloc);
1694
1695static void neigh_rcu_free_parms(struct rcu_head *head)
1696{
1697	struct neigh_parms *parms =
1698		container_of(head, struct neigh_parms, rcu_head);
1699
1700	neigh_parms_put(parms);
1701}
1702
1703void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1704{
1705	if (!parms || parms == &tbl->parms)
1706		return;
1707	write_lock_bh(&tbl->lock);
1708	list_del(&parms->list);
1709	parms->dead = 1;
1710	write_unlock_bh(&tbl->lock);
1711	netdev_put(parms->dev, &parms->dev_tracker);
 
1712	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1713}
1714EXPORT_SYMBOL(neigh_parms_release);
1715
1716static void neigh_parms_destroy(struct neigh_parms *parms)
1717{
1718	kfree(parms);
1719}
1720
1721static struct lock_class_key neigh_table_proxy_queue_class;
1722
1723static struct neigh_table __rcu *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1724
1725void neigh_table_init(int index, struct neigh_table *tbl)
1726{
1727	unsigned long now = jiffies;
1728	unsigned long phsize;
1729
1730	INIT_LIST_HEAD(&tbl->parms_list);
1731	INIT_LIST_HEAD(&tbl->gc_list);
1732	INIT_LIST_HEAD(&tbl->managed_list);
1733
1734	list_add(&tbl->parms.list, &tbl->parms_list);
1735	write_pnet(&tbl->parms.net, &init_net);
1736	refcount_set(&tbl->parms.refcnt, 1);
1737	tbl->parms.reachable_time =
1738			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1739	tbl->parms.qlen = 0;
1740
1741	tbl->stats = alloc_percpu(struct neigh_statistics);
1742	if (!tbl->stats)
1743		panic("cannot create neighbour cache statistics");
1744
1745#ifdef CONFIG_PROC_FS
1746	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1747			      &neigh_stat_seq_ops, tbl))
1748		panic("cannot create neighbour proc dir entry");
1749#endif
1750
1751	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1752
1753	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1754	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1755
1756	if (!tbl->nht || !tbl->phash_buckets)
1757		panic("cannot allocate neighbour cache hashes");
1758
1759	if (!tbl->entry_size)
1760		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1761					tbl->key_len, NEIGH_PRIV_ALIGN);
1762	else
1763		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1764
1765	rwlock_init(&tbl->lock);
1766
1767	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1768	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1769			tbl->parms.reachable_time);
1770	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1771	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1772
1773	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1774	skb_queue_head_init_class(&tbl->proxy_queue,
1775			&neigh_table_proxy_queue_class);
1776
1777	tbl->last_flush = now;
1778	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1779
1780	rcu_assign_pointer(neigh_tables[index], tbl);
1781}
1782EXPORT_SYMBOL(neigh_table_init);
1783
1784/*
1785 * Only called from ndisc_cleanup(), which means this is dead code
1786 * because we no longer can unload IPv6 module.
1787 */
1788int neigh_table_clear(int index, struct neigh_table *tbl)
1789{
1790	RCU_INIT_POINTER(neigh_tables[index], NULL);
1791	synchronize_rcu();
1792
1793	/* It is not clean... Fix it to unload IPv6 module safely */
1794	cancel_delayed_work_sync(&tbl->managed_work);
1795	cancel_delayed_work_sync(&tbl->gc_work);
1796	del_timer_sync(&tbl->proxy_timer);
1797	pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1798	neigh_ifdown(tbl, NULL);
1799	if (atomic_read(&tbl->entries))
1800		pr_crit("neighbour leakage\n");
1801
1802	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1803		 neigh_hash_free_rcu);
1804	tbl->nht = NULL;
1805
1806	kfree(tbl->phash_buckets);
1807	tbl->phash_buckets = NULL;
1808
1809	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1810
1811	free_percpu(tbl->stats);
1812	tbl->stats = NULL;
1813
1814	return 0;
1815}
1816EXPORT_SYMBOL(neigh_table_clear);
1817
1818static struct neigh_table *neigh_find_table(int family)
1819{
1820	struct neigh_table *tbl = NULL;
1821
1822	switch (family) {
1823	case AF_INET:
1824		tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ARP_TABLE]);
1825		break;
1826	case AF_INET6:
1827		tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ND_TABLE]);
 
 
 
1828		break;
1829	}
1830
1831	return tbl;
1832}
1833
1834const struct nla_policy nda_policy[NDA_MAX+1] = {
1835	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1836	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1837	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1838	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1839	[NDA_PROBES]		= { .type = NLA_U32 },
1840	[NDA_VLAN]		= { .type = NLA_U16 },
1841	[NDA_PORT]		= { .type = NLA_U16 },
1842	[NDA_VNI]		= { .type = NLA_U32 },
1843	[NDA_IFINDEX]		= { .type = NLA_U32 },
1844	[NDA_MASTER]		= { .type = NLA_U32 },
1845	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1846	[NDA_NH_ID]		= { .type = NLA_U32 },
1847	[NDA_FLAGS_EXT]		= NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1848	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1849};
1850
1851static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1852			struct netlink_ext_ack *extack)
1853{
1854	struct net *net = sock_net(skb->sk);
1855	struct ndmsg *ndm;
1856	struct nlattr *dst_attr;
1857	struct neigh_table *tbl;
1858	struct neighbour *neigh;
1859	struct net_device *dev = NULL;
1860	int err = -EINVAL;
1861
1862	ASSERT_RTNL();
1863	if (nlmsg_len(nlh) < sizeof(*ndm))
1864		goto out;
1865
1866	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1867	if (!dst_attr) {
1868		NL_SET_ERR_MSG(extack, "Network address not specified");
1869		goto out;
1870	}
1871
1872	ndm = nlmsg_data(nlh);
1873	if (ndm->ndm_ifindex) {
1874		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1875		if (dev == NULL) {
1876			err = -ENODEV;
1877			goto out;
1878		}
1879	}
1880
1881	tbl = neigh_find_table(ndm->ndm_family);
1882	if (tbl == NULL)
1883		return -EAFNOSUPPORT;
1884
1885	if (nla_len(dst_attr) < (int)tbl->key_len) {
1886		NL_SET_ERR_MSG(extack, "Invalid network address");
1887		goto out;
1888	}
1889
1890	if (ndm->ndm_flags & NTF_PROXY) {
1891		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1892		goto out;
1893	}
1894
1895	if (dev == NULL)
1896		goto out;
1897
1898	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1899	if (neigh == NULL) {
1900		err = -ENOENT;
1901		goto out;
1902	}
1903
1904	err = __neigh_update(neigh, NULL, NUD_FAILED,
1905			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1906			     NETLINK_CB(skb).portid, extack);
1907	write_lock_bh(&tbl->lock);
1908	neigh_release(neigh);
1909	neigh_remove_one(neigh);
1910	write_unlock_bh(&tbl->lock);
1911
1912out:
1913	return err;
1914}
1915
1916static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1917		     struct netlink_ext_ack *extack)
1918{
1919	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1920		    NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1921	struct net *net = sock_net(skb->sk);
1922	struct ndmsg *ndm;
1923	struct nlattr *tb[NDA_MAX+1];
1924	struct neigh_table *tbl;
1925	struct net_device *dev = NULL;
1926	struct neighbour *neigh;
1927	void *dst, *lladdr;
1928	u8 protocol = 0;
1929	u32 ndm_flags;
1930	int err;
1931
1932	ASSERT_RTNL();
1933	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1934				     nda_policy, extack);
1935	if (err < 0)
1936		goto out;
1937
1938	err = -EINVAL;
1939	if (!tb[NDA_DST]) {
1940		NL_SET_ERR_MSG(extack, "Network address not specified");
1941		goto out;
1942	}
1943
1944	ndm = nlmsg_data(nlh);
1945	ndm_flags = ndm->ndm_flags;
1946	if (tb[NDA_FLAGS_EXT]) {
1947		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1948
1949		BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1950			     (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1951			      hweight32(NTF_EXT_MASK)));
1952		ndm_flags |= (ext << NTF_EXT_SHIFT);
1953	}
1954	if (ndm->ndm_ifindex) {
1955		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1956		if (dev == NULL) {
1957			err = -ENODEV;
1958			goto out;
1959		}
1960
1961		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1962			NL_SET_ERR_MSG(extack, "Invalid link address");
1963			goto out;
1964		}
1965	}
1966
1967	tbl = neigh_find_table(ndm->ndm_family);
1968	if (tbl == NULL)
1969		return -EAFNOSUPPORT;
1970
1971	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1972		NL_SET_ERR_MSG(extack, "Invalid network address");
1973		goto out;
1974	}
1975
1976	dst = nla_data(tb[NDA_DST]);
1977	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1978
1979	if (tb[NDA_PROTOCOL])
1980		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1981	if (ndm_flags & NTF_PROXY) {
 
1982		struct pneigh_entry *pn;
1983
1984		if (ndm_flags & NTF_MANAGED) {
1985			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
1986			goto out;
1987		}
1988
1989		err = -ENOBUFS;
1990		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1991		if (pn) {
1992			pn->flags = ndm_flags;
1993			if (protocol)
1994				pn->protocol = protocol;
1995			err = 0;
1996		}
1997		goto out;
1998	}
1999
2000	if (!dev) {
2001		NL_SET_ERR_MSG(extack, "Device not specified");
2002		goto out;
2003	}
2004
2005	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2006		err = -EINVAL;
2007		goto out;
2008	}
2009
2010	neigh = neigh_lookup(tbl, dst, dev);
2011	if (neigh == NULL) {
2012		bool ndm_permanent  = ndm->ndm_state & NUD_PERMANENT;
2013		bool exempt_from_gc = ndm_permanent ||
2014				      ndm_flags & NTF_EXT_LEARNED;
2015
2016		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2017			err = -ENOENT;
2018			goto out;
2019		}
2020		if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2021			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2022			err = -EINVAL;
2023			goto out;
2024		}
2025
2026		neigh = ___neigh_create(tbl, dst, dev,
2027					ndm_flags &
2028					(NTF_EXT_LEARNED | NTF_MANAGED),
2029					exempt_from_gc, true);
2030		if (IS_ERR(neigh)) {
2031			err = PTR_ERR(neigh);
2032			goto out;
2033		}
2034	} else {
2035		if (nlh->nlmsg_flags & NLM_F_EXCL) {
2036			err = -EEXIST;
2037			neigh_release(neigh);
2038			goto out;
2039		}
2040
2041		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2042			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2043				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2044	}
2045
2046	if (protocol)
2047		neigh->protocol = protocol;
2048	if (ndm_flags & NTF_EXT_LEARNED)
2049		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2050	if (ndm_flags & NTF_ROUTER)
 
2051		flags |= NEIGH_UPDATE_F_ISROUTER;
2052	if (ndm_flags & NTF_MANAGED)
2053		flags |= NEIGH_UPDATE_F_MANAGED;
2054	if (ndm_flags & NTF_USE)
2055		flags |= NEIGH_UPDATE_F_USE;
2056
2057	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2058			     NETLINK_CB(skb).portid, extack);
2059	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2060		neigh_event_send(neigh, NULL);
2061		err = 0;
2062	}
 
 
 
 
 
 
2063	neigh_release(neigh);
 
2064out:
2065	return err;
2066}
2067
2068static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2069{
2070	struct nlattr *nest;
2071
2072	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2073	if (nest == NULL)
2074		return -ENOBUFS;
2075
2076	if ((parms->dev &&
2077	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2078	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2079	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2080			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2081	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2082	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2083			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2084	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2085	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2086	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2087			NEIGH_VAR(parms, UCAST_PROBES)) ||
2088	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2089			NEIGH_VAR(parms, MCAST_PROBES)) ||
2090	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2091			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2092	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2093			  NDTPA_PAD) ||
2094	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2095			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2096	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2097			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2098	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2099			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2100	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2101			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2102	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2103			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2104	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2105			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2106	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2107			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2108	    nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2109			  NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2110		goto nla_put_failure;
2111	return nla_nest_end(skb, nest);
2112
2113nla_put_failure:
2114	nla_nest_cancel(skb, nest);
2115	return -EMSGSIZE;
2116}
2117
2118static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2119			      u32 pid, u32 seq, int type, int flags)
2120{
2121	struct nlmsghdr *nlh;
2122	struct ndtmsg *ndtmsg;
2123
2124	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2125	if (nlh == NULL)
2126		return -EMSGSIZE;
2127
2128	ndtmsg = nlmsg_data(nlh);
2129
2130	read_lock_bh(&tbl->lock);
2131	ndtmsg->ndtm_family = tbl->family;
2132	ndtmsg->ndtm_pad1   = 0;
2133	ndtmsg->ndtm_pad2   = 0;
2134
2135	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2136	    nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2137			  NDTA_PAD) ||
2138	    nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2139	    nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2140	    nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2141		goto nla_put_failure;
2142	{
2143		unsigned long now = jiffies;
2144		long flush_delta = now - READ_ONCE(tbl->last_flush);
2145		long rand_delta = now - READ_ONCE(tbl->last_rand);
2146		struct neigh_hash_table *nht;
2147		struct ndt_config ndc = {
2148			.ndtc_key_len		= tbl->key_len,
2149			.ndtc_entry_size	= tbl->entry_size,
2150			.ndtc_entries		= atomic_read(&tbl->entries),
2151			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2152			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2153			.ndtc_proxy_qlen	= READ_ONCE(tbl->proxy_queue.qlen),
2154		};
2155
2156		rcu_read_lock();
2157		nht = rcu_dereference(tbl->nht);
2158		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2159		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2160		rcu_read_unlock();
2161
2162		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2163			goto nla_put_failure;
2164	}
2165
2166	{
2167		int cpu;
2168		struct ndt_stats ndst;
2169
2170		memset(&ndst, 0, sizeof(ndst));
2171
2172		for_each_possible_cpu(cpu) {
2173			struct neigh_statistics	*st;
2174
2175			st = per_cpu_ptr(tbl->stats, cpu);
2176			ndst.ndts_allocs		+= READ_ONCE(st->allocs);
2177			ndst.ndts_destroys		+= READ_ONCE(st->destroys);
2178			ndst.ndts_hash_grows		+= READ_ONCE(st->hash_grows);
2179			ndst.ndts_res_failed		+= READ_ONCE(st->res_failed);
2180			ndst.ndts_lookups		+= READ_ONCE(st->lookups);
2181			ndst.ndts_hits			+= READ_ONCE(st->hits);
2182			ndst.ndts_rcv_probes_mcast	+= READ_ONCE(st->rcv_probes_mcast);
2183			ndst.ndts_rcv_probes_ucast	+= READ_ONCE(st->rcv_probes_ucast);
2184			ndst.ndts_periodic_gc_runs	+= READ_ONCE(st->periodic_gc_runs);
2185			ndst.ndts_forced_gc_runs	+= READ_ONCE(st->forced_gc_runs);
2186			ndst.ndts_table_fulls		+= READ_ONCE(st->table_fulls);
2187		}
2188
2189		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2190				  NDTA_PAD))
2191			goto nla_put_failure;
2192	}
2193
2194	BUG_ON(tbl->parms.dev);
2195	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2196		goto nla_put_failure;
2197
2198	read_unlock_bh(&tbl->lock);
2199	nlmsg_end(skb, nlh);
2200	return 0;
2201
2202nla_put_failure:
2203	read_unlock_bh(&tbl->lock);
2204	nlmsg_cancel(skb, nlh);
2205	return -EMSGSIZE;
2206}
2207
2208static int neightbl_fill_param_info(struct sk_buff *skb,
2209				    struct neigh_table *tbl,
2210				    struct neigh_parms *parms,
2211				    u32 pid, u32 seq, int type,
2212				    unsigned int flags)
2213{
2214	struct ndtmsg *ndtmsg;
2215	struct nlmsghdr *nlh;
2216
2217	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2218	if (nlh == NULL)
2219		return -EMSGSIZE;
2220
2221	ndtmsg = nlmsg_data(nlh);
2222
2223	read_lock_bh(&tbl->lock);
2224	ndtmsg->ndtm_family = tbl->family;
2225	ndtmsg->ndtm_pad1   = 0;
2226	ndtmsg->ndtm_pad2   = 0;
2227
2228	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2229	    neightbl_fill_parms(skb, parms) < 0)
2230		goto errout;
2231
2232	read_unlock_bh(&tbl->lock);
2233	nlmsg_end(skb, nlh);
2234	return 0;
2235errout:
2236	read_unlock_bh(&tbl->lock);
2237	nlmsg_cancel(skb, nlh);
2238	return -EMSGSIZE;
2239}
2240
2241static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2242	[NDTA_NAME]		= { .type = NLA_STRING },
2243	[NDTA_THRESH1]		= { .type = NLA_U32 },
2244	[NDTA_THRESH2]		= { .type = NLA_U32 },
2245	[NDTA_THRESH3]		= { .type = NLA_U32 },
2246	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2247	[NDTA_PARMS]		= { .type = NLA_NESTED },
2248};
2249
2250static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2251	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2252	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2253	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2254	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2255	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2256	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2257	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2258	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2259	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2260	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2261	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2262	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2263	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2264	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2265	[NDTPA_INTERVAL_PROBE_TIME_MS]	= { .type = NLA_U64, .min = 1 },
2266};
2267
2268static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2269			struct netlink_ext_ack *extack)
2270{
2271	struct net *net = sock_net(skb->sk);
2272	struct neigh_table *tbl;
2273	struct ndtmsg *ndtmsg;
2274	struct nlattr *tb[NDTA_MAX+1];
2275	bool found = false;
2276	int err, tidx;
2277
2278	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2279				     nl_neightbl_policy, extack);
2280	if (err < 0)
2281		goto errout;
2282
2283	if (tb[NDTA_NAME] == NULL) {
2284		err = -EINVAL;
2285		goto errout;
2286	}
2287
2288	ndtmsg = nlmsg_data(nlh);
2289
2290	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2291		tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
2292		if (!tbl)
2293			continue;
2294		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2295			continue;
2296		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2297			found = true;
2298			break;
2299		}
2300	}
2301
2302	if (!found)
2303		return -ENOENT;
2304
2305	/*
2306	 * We acquire tbl->lock to be nice to the periodic timers and
2307	 * make sure they always see a consistent set of values.
2308	 */
2309	write_lock_bh(&tbl->lock);
2310
2311	if (tb[NDTA_PARMS]) {
2312		struct nlattr *tbp[NDTPA_MAX+1];
2313		struct neigh_parms *p;
2314		int i, ifindex = 0;
2315
2316		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2317						  tb[NDTA_PARMS],
2318						  nl_ntbl_parm_policy, extack);
2319		if (err < 0)
2320			goto errout_tbl_lock;
2321
2322		if (tbp[NDTPA_IFINDEX])
2323			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2324
2325		p = lookup_neigh_parms(tbl, net, ifindex);
2326		if (p == NULL) {
2327			err = -ENOENT;
2328			goto errout_tbl_lock;
2329		}
2330
2331		for (i = 1; i <= NDTPA_MAX; i++) {
2332			if (tbp[i] == NULL)
2333				continue;
2334
2335			switch (i) {
2336			case NDTPA_QUEUE_LEN:
2337				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2338					      nla_get_u32(tbp[i]) *
2339					      SKB_TRUESIZE(ETH_FRAME_LEN));
2340				break;
2341			case NDTPA_QUEUE_LENBYTES:
2342				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2343					      nla_get_u32(tbp[i]));
2344				break;
2345			case NDTPA_PROXY_QLEN:
2346				NEIGH_VAR_SET(p, PROXY_QLEN,
2347					      nla_get_u32(tbp[i]));
2348				break;
2349			case NDTPA_APP_PROBES:
2350				NEIGH_VAR_SET(p, APP_PROBES,
2351					      nla_get_u32(tbp[i]));
2352				break;
2353			case NDTPA_UCAST_PROBES:
2354				NEIGH_VAR_SET(p, UCAST_PROBES,
2355					      nla_get_u32(tbp[i]));
2356				break;
2357			case NDTPA_MCAST_PROBES:
2358				NEIGH_VAR_SET(p, MCAST_PROBES,
2359					      nla_get_u32(tbp[i]));
2360				break;
2361			case NDTPA_MCAST_REPROBES:
2362				NEIGH_VAR_SET(p, MCAST_REPROBES,
2363					      nla_get_u32(tbp[i]));
2364				break;
2365			case NDTPA_BASE_REACHABLE_TIME:
2366				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2367					      nla_get_msecs(tbp[i]));
2368				/* update reachable_time as well, otherwise, the change will
2369				 * only be effective after the next time neigh_periodic_work
2370				 * decides to recompute it (can be multiple minutes)
2371				 */
2372				p->reachable_time =
2373					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2374				break;
2375			case NDTPA_GC_STALETIME:
2376				NEIGH_VAR_SET(p, GC_STALETIME,
2377					      nla_get_msecs(tbp[i]));
2378				break;
2379			case NDTPA_DELAY_PROBE_TIME:
2380				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2381					      nla_get_msecs(tbp[i]));
2382				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2383				break;
2384			case NDTPA_INTERVAL_PROBE_TIME_MS:
2385				NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2386					      nla_get_msecs(tbp[i]));
2387				break;
2388			case NDTPA_RETRANS_TIME:
2389				NEIGH_VAR_SET(p, RETRANS_TIME,
2390					      nla_get_msecs(tbp[i]));
2391				break;
2392			case NDTPA_ANYCAST_DELAY:
2393				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2394					      nla_get_msecs(tbp[i]));
2395				break;
2396			case NDTPA_PROXY_DELAY:
2397				NEIGH_VAR_SET(p, PROXY_DELAY,
2398					      nla_get_msecs(tbp[i]));
2399				break;
2400			case NDTPA_LOCKTIME:
2401				NEIGH_VAR_SET(p, LOCKTIME,
2402					      nla_get_msecs(tbp[i]));
2403				break;
2404			}
2405		}
2406	}
2407
2408	err = -ENOENT;
2409	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2410	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2411	    !net_eq(net, &init_net))
2412		goto errout_tbl_lock;
2413
2414	if (tb[NDTA_THRESH1])
2415		WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2416
2417	if (tb[NDTA_THRESH2])
2418		WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2419
2420	if (tb[NDTA_THRESH3])
2421		WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2422
2423	if (tb[NDTA_GC_INTERVAL])
2424		WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2425
2426	err = 0;
2427
2428errout_tbl_lock:
2429	write_unlock_bh(&tbl->lock);
2430errout:
2431	return err;
2432}
2433
2434static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2435				    struct netlink_ext_ack *extack)
2436{
2437	struct ndtmsg *ndtm;
2438
2439	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2440		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2441		return -EINVAL;
2442	}
2443
2444	ndtm = nlmsg_data(nlh);
2445	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2446		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2447		return -EINVAL;
2448	}
2449
2450	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2451		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2452		return -EINVAL;
2453	}
2454
2455	return 0;
2456}
2457
2458static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2459{
2460	const struct nlmsghdr *nlh = cb->nlh;
2461	struct net *net = sock_net(skb->sk);
2462	int family, tidx, nidx = 0;
2463	int tbl_skip = cb->args[0];
2464	int neigh_skip = cb->args[1];
2465	struct neigh_table *tbl;
2466
2467	if (cb->strict_check) {
2468		int err = neightbl_valid_dump_info(nlh, cb->extack);
2469
2470		if (err < 0)
2471			return err;
2472	}
2473
2474	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2475
2476	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2477		struct neigh_parms *p;
2478
2479		tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
2480		if (!tbl)
2481			continue;
2482
2483		if (tidx < tbl_skip || (family && tbl->family != family))
2484			continue;
2485
2486		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2487				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2488				       NLM_F_MULTI) < 0)
2489			break;
2490
2491		nidx = 0;
2492		p = list_next_entry(&tbl->parms, list);
2493		list_for_each_entry_from(p, &tbl->parms_list, list) {
2494			if (!net_eq(neigh_parms_net(p), net))
2495				continue;
2496
2497			if (nidx < neigh_skip)
2498				goto next;
2499
2500			if (neightbl_fill_param_info(skb, tbl, p,
2501						     NETLINK_CB(cb->skb).portid,
2502						     nlh->nlmsg_seq,
2503						     RTM_NEWNEIGHTBL,
2504						     NLM_F_MULTI) < 0)
2505				goto out;
2506		next:
2507			nidx++;
2508		}
2509
2510		neigh_skip = 0;
2511	}
2512out:
2513	cb->args[0] = tidx;
2514	cb->args[1] = nidx;
2515
2516	return skb->len;
2517}
2518
2519static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2520			   u32 pid, u32 seq, int type, unsigned int flags)
2521{
2522	u32 neigh_flags, neigh_flags_ext;
2523	unsigned long now = jiffies;
2524	struct nda_cacheinfo ci;
2525	struct nlmsghdr *nlh;
2526	struct ndmsg *ndm;
2527
2528	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2529	if (nlh == NULL)
2530		return -EMSGSIZE;
2531
2532	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2533	neigh_flags     = neigh->flags & NTF_OLD_MASK;
2534
2535	ndm = nlmsg_data(nlh);
2536	ndm->ndm_family	 = neigh->ops->family;
2537	ndm->ndm_pad1    = 0;
2538	ndm->ndm_pad2    = 0;
2539	ndm->ndm_flags	 = neigh_flags;
2540	ndm->ndm_type	 = neigh->type;
2541	ndm->ndm_ifindex = neigh->dev->ifindex;
2542
2543	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2544		goto nla_put_failure;
2545
2546	read_lock_bh(&neigh->lock);
2547	ndm->ndm_state	 = neigh->nud_state;
2548	if (neigh->nud_state & NUD_VALID) {
2549		char haddr[MAX_ADDR_LEN];
2550
2551		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2552		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2553			read_unlock_bh(&neigh->lock);
2554			goto nla_put_failure;
2555		}
2556	}
2557
2558	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2559	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2560	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2561	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2562	read_unlock_bh(&neigh->lock);
2563
2564	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2565	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2566		goto nla_put_failure;
2567
2568	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2569		goto nla_put_failure;
2570	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2571		goto nla_put_failure;
2572
2573	nlmsg_end(skb, nlh);
2574	return 0;
2575
2576nla_put_failure:
2577	nlmsg_cancel(skb, nlh);
2578	return -EMSGSIZE;
2579}
2580
2581static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2582			    u32 pid, u32 seq, int type, unsigned int flags,
2583			    struct neigh_table *tbl)
2584{
2585	u32 neigh_flags, neigh_flags_ext;
2586	struct nlmsghdr *nlh;
2587	struct ndmsg *ndm;
2588
2589	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2590	if (nlh == NULL)
2591		return -EMSGSIZE;
2592
2593	neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2594	neigh_flags     = pn->flags & NTF_OLD_MASK;
2595
2596	ndm = nlmsg_data(nlh);
2597	ndm->ndm_family	 = tbl->family;
2598	ndm->ndm_pad1    = 0;
2599	ndm->ndm_pad2    = 0;
2600	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
2601	ndm->ndm_type	 = RTN_UNICAST;
2602	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2603	ndm->ndm_state	 = NUD_NONE;
2604
2605	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2606		goto nla_put_failure;
2607
2608	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2609		goto nla_put_failure;
2610	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2611		goto nla_put_failure;
2612
2613	nlmsg_end(skb, nlh);
2614	return 0;
2615
2616nla_put_failure:
2617	nlmsg_cancel(skb, nlh);
2618	return -EMSGSIZE;
2619}
2620
2621static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2622{
2623	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2624	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2625}
2626
2627static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2628{
2629	struct net_device *master;
2630
2631	if (!master_idx)
2632		return false;
2633
2634	master = dev ? netdev_master_upper_dev_get_rcu(dev) : NULL;
2635
2636	/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2637	 * invalid value for ifindex to denote "no master".
2638	 */
2639	if (master_idx == -1)
2640		return !!master;
2641
2642	if (!master || master->ifindex != master_idx)
2643		return true;
2644
2645	return false;
2646}
2647
2648static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2649{
2650	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2651		return true;
2652
2653	return false;
2654}
2655
2656struct neigh_dump_filter {
2657	int master_idx;
2658	int dev_idx;
2659};
2660
2661static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2662			    struct netlink_callback *cb,
2663			    struct neigh_dump_filter *filter)
2664{
2665	struct net *net = sock_net(skb->sk);
2666	struct neighbour *n;
2667	int err = 0, h, s_h = cb->args[1];
2668	int idx, s_idx = idx = cb->args[2];
2669	struct neigh_hash_table *nht;
2670	unsigned int flags = NLM_F_MULTI;
2671
2672	if (filter->dev_idx || filter->master_idx)
2673		flags |= NLM_F_DUMP_FILTERED;
2674
2675	nht = rcu_dereference(tbl->nht);
 
2676
2677	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2678		if (h > s_h)
2679			s_idx = 0;
2680		idx = 0;
2681		neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[h]) {
 
2682			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2683				goto next;
2684			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2685			    neigh_master_filtered(n->dev, filter->master_idx))
2686				goto next;
2687			err = neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2688					      cb->nlh->nlmsg_seq,
2689					      RTM_NEWNEIGH, flags);
2690			if (err < 0)
 
2691				goto out;
 
2692next:
2693			idx++;
2694		}
2695	}
 
2696out:
 
2697	cb->args[1] = h;
2698	cb->args[2] = idx;
2699	return err;
2700}
2701
2702static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2703			     struct netlink_callback *cb,
2704			     struct neigh_dump_filter *filter)
2705{
2706	struct pneigh_entry *n;
2707	struct net *net = sock_net(skb->sk);
2708	int err = 0, h, s_h = cb->args[3];
2709	int idx, s_idx = idx = cb->args[4];
2710	unsigned int flags = NLM_F_MULTI;
2711
2712	if (filter->dev_idx || filter->master_idx)
2713		flags |= NLM_F_DUMP_FILTERED;
2714
2715	read_lock_bh(&tbl->lock);
2716
2717	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2718		if (h > s_h)
2719			s_idx = 0;
2720		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2721			if (idx < s_idx || pneigh_net(n) != net)
2722				goto next;
2723			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2724			    neigh_master_filtered(n->dev, filter->master_idx))
2725				goto next;
2726			err = pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2727					       cb->nlh->nlmsg_seq,
2728					       RTM_NEWNEIGH, flags, tbl);
2729			if (err < 0) {
2730				read_unlock_bh(&tbl->lock);
 
2731				goto out;
2732			}
2733		next:
2734			idx++;
2735		}
2736	}
2737
2738	read_unlock_bh(&tbl->lock);
 
2739out:
2740	cb->args[3] = h;
2741	cb->args[4] = idx;
2742	return err;
 
2743}
2744
2745static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2746				bool strict_check,
2747				struct neigh_dump_filter *filter,
2748				struct netlink_ext_ack *extack)
2749{
2750	struct nlattr *tb[NDA_MAX + 1];
2751	int err, i;
2752
2753	if (strict_check) {
2754		struct ndmsg *ndm;
2755
2756		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2757			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2758			return -EINVAL;
2759		}
2760
2761		ndm = nlmsg_data(nlh);
2762		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2763		    ndm->ndm_state || ndm->ndm_type) {
2764			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2765			return -EINVAL;
2766		}
2767
2768		if (ndm->ndm_flags & ~NTF_PROXY) {
2769			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2770			return -EINVAL;
2771		}
2772
2773		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2774						    tb, NDA_MAX, nda_policy,
2775						    extack);
2776	} else {
2777		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2778					     NDA_MAX, nda_policy, extack);
2779	}
2780	if (err < 0)
2781		return err;
2782
2783	for (i = 0; i <= NDA_MAX; ++i) {
2784		if (!tb[i])
2785			continue;
2786
2787		/* all new attributes should require strict_check */
2788		switch (i) {
2789		case NDA_IFINDEX:
2790			filter->dev_idx = nla_get_u32(tb[i]);
2791			break;
2792		case NDA_MASTER:
2793			filter->master_idx = nla_get_u32(tb[i]);
2794			break;
2795		default:
2796			if (strict_check) {
2797				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2798				return -EINVAL;
2799			}
2800		}
2801	}
2802
2803	return 0;
2804}
2805
2806static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2807{
2808	const struct nlmsghdr *nlh = cb->nlh;
2809	struct neigh_dump_filter filter = {};
2810	struct neigh_table *tbl;
2811	int t, family, s_t;
2812	int proxy = 0;
2813	int err;
2814
2815	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2816
2817	/* check for full ndmsg structure presence, family member is
2818	 * the same for both structures
2819	 */
2820	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2821	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2822		proxy = 1;
2823
2824	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2825	if (err < 0 && cb->strict_check)
2826		return err;
2827	err = 0;
2828
2829	s_t = cb->args[0];
2830
2831	rcu_read_lock();
2832	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2833		tbl = rcu_dereference(neigh_tables[t]);
2834
2835		if (!tbl)
2836			continue;
2837		if (t < s_t || (family && tbl->family != family))
2838			continue;
2839		if (t > s_t)
2840			memset(&cb->args[1], 0, sizeof(cb->args) -
2841						sizeof(cb->args[0]));
2842		if (proxy)
2843			err = pneigh_dump_table(tbl, skb, cb, &filter);
2844		else
2845			err = neigh_dump_table(tbl, skb, cb, &filter);
2846		if (err < 0)
2847			break;
2848	}
2849	rcu_read_unlock();
2850
2851	cb->args[0] = t;
2852	return err;
2853}
2854
2855static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2856			       struct neigh_table **tbl,
2857			       void **dst, int *dev_idx, u8 *ndm_flags,
2858			       struct netlink_ext_ack *extack)
2859{
2860	struct nlattr *tb[NDA_MAX + 1];
2861	struct ndmsg *ndm;
2862	int err, i;
2863
2864	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2865		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2866		return -EINVAL;
2867	}
2868
2869	ndm = nlmsg_data(nlh);
2870	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2871	    ndm->ndm_type) {
2872		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2873		return -EINVAL;
2874	}
2875
2876	if (ndm->ndm_flags & ~NTF_PROXY) {
2877		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2878		return -EINVAL;
2879	}
2880
2881	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2882					    NDA_MAX, nda_policy, extack);
2883	if (err < 0)
2884		return err;
2885
2886	*ndm_flags = ndm->ndm_flags;
2887	*dev_idx = ndm->ndm_ifindex;
2888	*tbl = neigh_find_table(ndm->ndm_family);
2889	if (*tbl == NULL) {
2890		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2891		return -EAFNOSUPPORT;
2892	}
2893
2894	for (i = 0; i <= NDA_MAX; ++i) {
2895		if (!tb[i])
2896			continue;
2897
2898		switch (i) {
2899		case NDA_DST:
2900			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2901				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2902				return -EINVAL;
2903			}
2904			*dst = nla_data(tb[i]);
2905			break;
2906		default:
2907			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2908			return -EINVAL;
2909		}
2910	}
2911
2912	return 0;
2913}
2914
2915static inline size_t neigh_nlmsg_size(void)
2916{
2917	return NLMSG_ALIGN(sizeof(struct ndmsg))
2918	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2919	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2920	       + nla_total_size(sizeof(struct nda_cacheinfo))
2921	       + nla_total_size(4)  /* NDA_PROBES */
2922	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2923	       + nla_total_size(1); /* NDA_PROTOCOL */
2924}
2925
2926static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2927			   u32 pid, u32 seq)
2928{
2929	struct sk_buff *skb;
2930	int err = 0;
2931
2932	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2933	if (!skb)
2934		return -ENOBUFS;
2935
2936	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2937	if (err) {
2938		kfree_skb(skb);
2939		goto errout;
2940	}
2941
2942	err = rtnl_unicast(skb, net, pid);
2943errout:
2944	return err;
2945}
2946
2947static inline size_t pneigh_nlmsg_size(void)
2948{
2949	return NLMSG_ALIGN(sizeof(struct ndmsg))
2950	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2951	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2952	       + nla_total_size(1); /* NDA_PROTOCOL */
2953}
2954
2955static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2956			    u32 pid, u32 seq, struct neigh_table *tbl)
2957{
2958	struct sk_buff *skb;
2959	int err = 0;
2960
2961	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2962	if (!skb)
2963		return -ENOBUFS;
2964
2965	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2966	if (err) {
2967		kfree_skb(skb);
2968		goto errout;
2969	}
2970
2971	err = rtnl_unicast(skb, net, pid);
2972errout:
2973	return err;
2974}
2975
2976static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2977		     struct netlink_ext_ack *extack)
2978{
2979	struct net *net = sock_net(in_skb->sk);
2980	struct net_device *dev = NULL;
2981	struct neigh_table *tbl = NULL;
2982	struct neighbour *neigh;
2983	void *dst = NULL;
2984	u8 ndm_flags = 0;
2985	int dev_idx = 0;
2986	int err;
2987
2988	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2989				  extack);
2990	if (err < 0)
2991		return err;
2992
2993	if (dev_idx) {
2994		dev = __dev_get_by_index(net, dev_idx);
2995		if (!dev) {
2996			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2997			return -ENODEV;
2998		}
2999	}
3000
3001	if (!dst) {
3002		NL_SET_ERR_MSG(extack, "Network address not specified");
3003		return -EINVAL;
3004	}
3005
3006	if (ndm_flags & NTF_PROXY) {
3007		struct pneigh_entry *pn;
3008
3009		pn = pneigh_lookup(tbl, net, dst, dev, 0);
3010		if (!pn) {
3011			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3012			return -ENOENT;
3013		}
3014		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3015					nlh->nlmsg_seq, tbl);
3016	}
3017
3018	if (!dev) {
3019		NL_SET_ERR_MSG(extack, "No device specified");
3020		return -EINVAL;
3021	}
3022
3023	neigh = neigh_lookup(tbl, dst, dev);
3024	if (!neigh) {
3025		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3026		return -ENOENT;
3027	}
3028
3029	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3030			      nlh->nlmsg_seq);
3031
3032	neigh_release(neigh);
3033
3034	return err;
3035}
3036
3037void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3038{
3039	int chain;
3040	struct neigh_hash_table *nht;
3041
3042	rcu_read_lock();
3043	nht = rcu_dereference(tbl->nht);
3044
3045	read_lock_bh(&tbl->lock); /* avoid resizes */
3046	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3047		struct neighbour *n;
3048
3049		neigh_for_each_in_bucket(n, &nht->hash_heads[chain])
 
 
3050			cb(n, cookie);
3051	}
3052	read_unlock_bh(&tbl->lock);
3053	rcu_read_unlock();
3054}
3055EXPORT_SYMBOL(neigh_for_each);
3056
3057/* The tbl->lock must be held as a writer and BH disabled. */
3058void __neigh_for_each_release(struct neigh_table *tbl,
3059			      int (*cb)(struct neighbour *))
3060{
 
3061	struct neigh_hash_table *nht;
3062	int chain;
3063
3064	nht = rcu_dereference_protected(tbl->nht,
3065					lockdep_is_held(&tbl->lock));
3066	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3067		struct hlist_node *tmp;
3068		struct neighbour *n;
 
3069
3070		neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[chain]) {
 
 
3071			int release;
3072
3073			write_lock(&n->lock);
3074			release = cb(n);
3075			if (release) {
3076				hlist_del_rcu(&n->hash);
3077				hlist_del_rcu(&n->dev_list);
 
3078				neigh_mark_dead(n);
3079			}
 
3080			write_unlock(&n->lock);
3081			if (release)
3082				neigh_cleanup_and_release(n);
3083		}
3084	}
3085}
3086EXPORT_SYMBOL(__neigh_for_each_release);
3087
3088int neigh_xmit(int index, struct net_device *dev,
3089	       const void *addr, struct sk_buff *skb)
3090{
3091	int err = -EAFNOSUPPORT;
3092
3093	if (likely(index < NEIGH_NR_TABLES)) {
3094		struct neigh_table *tbl;
3095		struct neighbour *neigh;
3096
3097		rcu_read_lock();
3098		tbl = rcu_dereference(neigh_tables[index]);
3099		if (!tbl)
3100			goto out_unlock;
 
3101		if (index == NEIGH_ARP_TABLE) {
3102			u32 key = *((u32 *)addr);
3103
3104			neigh = __ipv4_neigh_lookup_noref(dev, key);
3105		} else {
3106			neigh = __neigh_lookup_noref(tbl, addr, dev);
3107		}
3108		if (!neigh)
3109			neigh = __neigh_create(tbl, addr, dev, false);
3110		err = PTR_ERR(neigh);
3111		if (IS_ERR(neigh)) {
3112			rcu_read_unlock();
3113			goto out_kfree_skb;
3114		}
3115		err = READ_ONCE(neigh->output)(neigh, skb);
3116out_unlock:
3117		rcu_read_unlock();
3118	}
3119	else if (index == NEIGH_LINK_TABLE) {
3120		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3121				      addr, NULL, skb->len);
3122		if (err < 0)
3123			goto out_kfree_skb;
3124		err = dev_queue_xmit(skb);
3125	}
3126out:
3127	return err;
3128out_kfree_skb:
3129	kfree_skb(skb);
3130	goto out;
3131}
3132EXPORT_SYMBOL(neigh_xmit);
3133
3134#ifdef CONFIG_PROC_FS
3135
3136static struct neighbour *neigh_get_valid(struct seq_file *seq,
3137					 struct neighbour *n,
3138					 loff_t *pos)
3139{
3140	struct neigh_seq_state *state = seq->private;
3141	struct net *net = seq_file_net(seq);
3142
3143	if (!net_eq(dev_net(n->dev), net))
3144		return NULL;
3145
3146	if (state->neigh_sub_iter) {
3147		loff_t fakep = 0;
3148		void *v;
3149
3150		v = state->neigh_sub_iter(state, n, pos ? pos : &fakep);
3151		if (!v)
3152			return NULL;
3153		if (pos)
3154			return v;
3155	}
3156
3157	if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3158		return n;
3159
3160	if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3161		return n;
3162
3163	return NULL;
3164}
3165
3166static struct neighbour *neigh_get_first(struct seq_file *seq)
3167{
3168	struct neigh_seq_state *state = seq->private;
3169	struct neigh_hash_table *nht = state->nht;
3170	struct neighbour *n, *tmp;
 
3171
3172	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
 
 
3173
3174	while (++state->bucket < (1 << nht->hash_shift)) {
3175		neigh_for_each_in_bucket(n, &nht->hash_heads[state->bucket]) {
3176			tmp = neigh_get_valid(seq, n, NULL);
3177			if (tmp)
3178				return tmp;
 
 
 
 
 
 
 
 
 
 
 
 
3179		}
 
 
 
3180	}
 
3181
3182	return NULL;
3183}
3184
3185static struct neighbour *neigh_get_next(struct seq_file *seq,
3186					struct neighbour *n,
3187					loff_t *pos)
3188{
3189	struct neigh_seq_state *state = seq->private;
3190	struct neighbour *tmp;
 
3191
3192	if (state->neigh_sub_iter) {
3193		void *v = state->neigh_sub_iter(state, n, pos);
3194
3195		if (v)
3196			return n;
3197	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3198
3199	hlist_for_each_entry_continue(n, hash) {
3200		tmp = neigh_get_valid(seq, n, pos);
3201		if (tmp) {
3202			n = tmp;
3203			goto out;
3204		}
 
 
 
 
 
 
 
 
3205	}
3206
3207	n = neigh_get_first(seq);
3208out:
3209	if (n && pos)
3210		--(*pos);
3211
3212	return n;
3213}
3214
3215static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3216{
3217	struct neighbour *n = neigh_get_first(seq);
3218
3219	if (n) {
3220		--(*pos);
3221		while (*pos) {
3222			n = neigh_get_next(seq, n, pos);
3223			if (!n)
3224				break;
3225		}
3226	}
3227	return *pos ? NULL : n;
3228}
3229
3230static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3231{
3232	struct neigh_seq_state *state = seq->private;
3233	struct net *net = seq_file_net(seq);
3234	struct neigh_table *tbl = state->tbl;
3235	struct pneigh_entry *pn = NULL;
3236	int bucket;
3237
3238	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3239	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3240		pn = tbl->phash_buckets[bucket];
3241		while (pn && !net_eq(pneigh_net(pn), net))
3242			pn = pn->next;
3243		if (pn)
3244			break;
3245	}
3246	state->bucket = bucket;
3247
3248	return pn;
3249}
3250
3251static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3252					    struct pneigh_entry *pn,
3253					    loff_t *pos)
3254{
3255	struct neigh_seq_state *state = seq->private;
3256	struct net *net = seq_file_net(seq);
3257	struct neigh_table *tbl = state->tbl;
3258
3259	do {
3260		pn = pn->next;
3261	} while (pn && !net_eq(pneigh_net(pn), net));
3262
3263	while (!pn) {
3264		if (++state->bucket > PNEIGH_HASHMASK)
3265			break;
3266		pn = tbl->phash_buckets[state->bucket];
3267		while (pn && !net_eq(pneigh_net(pn), net))
3268			pn = pn->next;
3269		if (pn)
3270			break;
3271	}
3272
3273	if (pn && pos)
3274		--(*pos);
3275
3276	return pn;
3277}
3278
3279static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3280{
3281	struct pneigh_entry *pn = pneigh_get_first(seq);
3282
3283	if (pn) {
3284		--(*pos);
3285		while (*pos) {
3286			pn = pneigh_get_next(seq, pn, pos);
3287			if (!pn)
3288				break;
3289		}
3290	}
3291	return *pos ? NULL : pn;
3292}
3293
3294static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3295{
3296	struct neigh_seq_state *state = seq->private;
3297	void *rc;
3298	loff_t idxpos = *pos;
3299
3300	rc = neigh_get_idx(seq, &idxpos);
3301	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3302		rc = pneigh_get_idx(seq, &idxpos);
3303
3304	return rc;
3305}
3306
3307void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3308	__acquires(tbl->lock)
3309	__acquires(rcu)
3310{
3311	struct neigh_seq_state *state = seq->private;
3312
3313	state->tbl = tbl;
3314	state->bucket = -1;
3315	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3316
3317	rcu_read_lock();
3318	state->nht = rcu_dereference(tbl->nht);
3319	read_lock_bh(&tbl->lock);
3320
3321	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3322}
3323EXPORT_SYMBOL(neigh_seq_start);
3324
3325void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3326{
3327	struct neigh_seq_state *state;
3328	void *rc;
3329
3330	if (v == SEQ_START_TOKEN) {
3331		rc = neigh_get_first(seq);
3332		goto out;
3333	}
3334
3335	state = seq->private;
3336	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3337		rc = neigh_get_next(seq, v, NULL);
3338		if (rc)
3339			goto out;
3340		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3341			rc = pneigh_get_first(seq);
3342	} else {
3343		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3344		rc = pneigh_get_next(seq, v, NULL);
3345	}
3346out:
3347	++(*pos);
3348	return rc;
3349}
3350EXPORT_SYMBOL(neigh_seq_next);
3351
3352void neigh_seq_stop(struct seq_file *seq, void *v)
3353	__releases(tbl->lock)
3354	__releases(rcu)
3355{
3356	struct neigh_seq_state *state = seq->private;
3357	struct neigh_table *tbl = state->tbl;
3358
3359	read_unlock_bh(&tbl->lock);
3360	rcu_read_unlock();
3361}
3362EXPORT_SYMBOL(neigh_seq_stop);
3363
3364/* statistics via seq_file */
3365
3366static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3367{
3368	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3369	int cpu;
3370
3371	if (*pos == 0)
3372		return SEQ_START_TOKEN;
3373
3374	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3375		if (!cpu_possible(cpu))
3376			continue;
3377		*pos = cpu+1;
3378		return per_cpu_ptr(tbl->stats, cpu);
3379	}
3380	return NULL;
3381}
3382
3383static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3384{
3385	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3386	int cpu;
3387
3388	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3389		if (!cpu_possible(cpu))
3390			continue;
3391		*pos = cpu+1;
3392		return per_cpu_ptr(tbl->stats, cpu);
3393	}
3394	(*pos)++;
3395	return NULL;
3396}
3397
3398static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3399{
3400
3401}
3402
3403static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3404{
3405	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3406	struct neigh_statistics *st = v;
3407
3408	if (v == SEQ_START_TOKEN) {
3409		seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3410		return 0;
3411	}
3412
3413	seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3414			"%08lx         %08lx         %08lx         "
3415			"%08lx       %08lx            %08lx\n",
3416		   atomic_read(&tbl->entries),
3417
3418		   st->allocs,
3419		   st->destroys,
3420		   st->hash_grows,
3421
3422		   st->lookups,
3423		   st->hits,
3424
3425		   st->res_failed,
3426
3427		   st->rcv_probes_mcast,
3428		   st->rcv_probes_ucast,
3429
3430		   st->periodic_gc_runs,
3431		   st->forced_gc_runs,
3432		   st->unres_discards,
3433		   st->table_fulls
3434		   );
3435
3436	return 0;
3437}
3438
3439static const struct seq_operations neigh_stat_seq_ops = {
3440	.start	= neigh_stat_seq_start,
3441	.next	= neigh_stat_seq_next,
3442	.stop	= neigh_stat_seq_stop,
3443	.show	= neigh_stat_seq_show,
3444};
3445#endif /* CONFIG_PROC_FS */
3446
3447static void __neigh_notify(struct neighbour *n, int type, int flags,
3448			   u32 pid)
3449{
 
3450	struct sk_buff *skb;
3451	int err = -ENOBUFS;
3452	struct net *net;
3453
3454	rcu_read_lock();
3455	net = dev_net_rcu(n->dev);
3456	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3457	if (skb == NULL)
3458		goto errout;
3459
3460	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3461	if (err < 0) {
3462		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3463		WARN_ON(err == -EMSGSIZE);
3464		kfree_skb(skb);
3465		goto errout;
3466	}
3467	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3468	goto out;
3469errout:
3470	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3471out:
3472	rcu_read_unlock();
3473}
3474
3475void neigh_app_ns(struct neighbour *n)
3476{
3477	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3478}
3479EXPORT_SYMBOL(neigh_app_ns);
3480
3481#ifdef CONFIG_SYSCTL
3482static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3483
3484static int proc_unres_qlen(const struct ctl_table *ctl, int write,
3485			   void *buffer, size_t *lenp, loff_t *ppos)
3486{
3487	int size, ret;
3488	struct ctl_table tmp = *ctl;
3489
3490	tmp.extra1 = SYSCTL_ZERO;
3491	tmp.extra2 = &unres_qlen_max;
3492	tmp.data = &size;
3493
3494	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3495	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3496
3497	if (write && !ret)
3498		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3499	return ret;
3500}
3501
 
 
 
 
 
 
 
 
 
 
 
 
3502static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3503				  int index)
3504{
3505	struct net_device *dev;
3506	int family = neigh_parms_family(p);
3507
3508	rcu_read_lock();
3509	for_each_netdev_rcu(net, dev) {
3510		struct neigh_parms *dst_p =
3511				neigh_get_dev_parms_rcu(dev, family);
3512
3513		if (dst_p && !test_bit(index, dst_p->data_state))
3514			dst_p->data[index] = p->data[index];
3515	}
3516	rcu_read_unlock();
3517}
3518
3519static void neigh_proc_update(const struct ctl_table *ctl, int write)
3520{
3521	struct net_device *dev = ctl->extra1;
3522	struct neigh_parms *p = ctl->extra2;
3523	struct net *net = neigh_parms_net(p);
3524	int index = (int *) ctl->data - p->data;
3525
3526	if (!write)
3527		return;
3528
3529	set_bit(index, p->data_state);
3530	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3531		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3532	if (!dev) /* NULL dev means this is default value */
3533		neigh_copy_dflt_parms(net, p, index);
3534}
3535
3536static int neigh_proc_dointvec_zero_intmax(const struct ctl_table *ctl, int write,
3537					   void *buffer, size_t *lenp,
3538					   loff_t *ppos)
3539{
3540	struct ctl_table tmp = *ctl;
3541	int ret;
3542
3543	tmp.extra1 = SYSCTL_ZERO;
3544	tmp.extra2 = SYSCTL_INT_MAX;
3545
3546	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3547	neigh_proc_update(ctl, write);
3548	return ret;
3549}
3550
3551static int neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table *ctl, int write,
3552						   void *buffer, size_t *lenp, loff_t *ppos)
3553{
3554	struct ctl_table tmp = *ctl;
3555	int ret;
3556
3557	int min = msecs_to_jiffies(1);
3558
3559	tmp.extra1 = &min;
3560	tmp.extra2 = NULL;
3561
3562	ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3563	neigh_proc_update(ctl, write);
3564	return ret;
3565}
3566
3567int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer,
3568			size_t *lenp, loff_t *ppos)
3569{
3570	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3571
3572	neigh_proc_update(ctl, write);
3573	return ret;
3574}
3575EXPORT_SYMBOL(neigh_proc_dointvec);
3576
3577int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer,
 
3578				size_t *lenp, loff_t *ppos)
3579{
3580	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3581
3582	neigh_proc_update(ctl, write);
3583	return ret;
3584}
3585EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3586
3587static int neigh_proc_dointvec_userhz_jiffies(const struct ctl_table *ctl, int write,
3588					      void *buffer, size_t *lenp,
3589					      loff_t *ppos)
3590{
3591	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3592
3593	neigh_proc_update(ctl, write);
3594	return ret;
3595}
3596
3597int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
3598				   void *buffer, size_t *lenp, loff_t *ppos)
 
3599{
3600	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3601
3602	neigh_proc_update(ctl, write);
3603	return ret;
3604}
3605EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3606
3607static int neigh_proc_dointvec_unres_qlen(const struct ctl_table *ctl, int write,
3608					  void *buffer, size_t *lenp,
3609					  loff_t *ppos)
3610{
3611	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3612
3613	neigh_proc_update(ctl, write);
3614	return ret;
3615}
3616
3617static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write,
3618					  void *buffer, size_t *lenp,
3619					  loff_t *ppos)
3620{
3621	struct neigh_parms *p = ctl->extra2;
3622	int ret;
3623
3624	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3625		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3626	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3627		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3628	else
3629		ret = -1;
3630
3631	if (write && ret == 0) {
3632		/* update reachable_time as well, otherwise, the change will
3633		 * only be effective after the next time neigh_periodic_work
3634		 * decides to recompute it
3635		 */
3636		p->reachable_time =
3637			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3638	}
3639	return ret;
3640}
3641
3642#define NEIGH_PARMS_DATA_OFFSET(index)	\
3643	(&((struct neigh_parms *) 0)->data[index])
3644
3645#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3646	[NEIGH_VAR_ ## attr] = { \
3647		.procname	= name, \
3648		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3649		.maxlen		= sizeof(int), \
3650		.mode		= mval, \
3651		.proc_handler	= proc, \
3652	}
3653
3654#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3655	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3656
3657#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3658	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3659
3660#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3661	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3662
3663#define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3664	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3665
3666#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3667	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3668
3669#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3670	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3671
3672static struct neigh_sysctl_table {
3673	struct ctl_table_header *sysctl_header;
3674	struct ctl_table neigh_vars[NEIGH_VAR_MAX];
3675} neigh_sysctl_template __read_mostly = {
3676	.neigh_vars = {
3677		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3678		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3679		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3680		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3681		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3682		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3683		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3684		NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3685						       "interval_probe_time_ms"),
3686		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3687		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3688		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3689		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3690		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3691		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3692		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3693		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3694		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3695		[NEIGH_VAR_GC_INTERVAL] = {
3696			.procname	= "gc_interval",
3697			.maxlen		= sizeof(int),
3698			.mode		= 0644,
3699			.proc_handler	= proc_dointvec_jiffies,
3700		},
3701		[NEIGH_VAR_GC_THRESH1] = {
3702			.procname	= "gc_thresh1",
3703			.maxlen		= sizeof(int),
3704			.mode		= 0644,
3705			.extra1		= SYSCTL_ZERO,
3706			.extra2		= SYSCTL_INT_MAX,
3707			.proc_handler	= proc_dointvec_minmax,
3708		},
3709		[NEIGH_VAR_GC_THRESH2] = {
3710			.procname	= "gc_thresh2",
3711			.maxlen		= sizeof(int),
3712			.mode		= 0644,
3713			.extra1		= SYSCTL_ZERO,
3714			.extra2		= SYSCTL_INT_MAX,
3715			.proc_handler	= proc_dointvec_minmax,
3716		},
3717		[NEIGH_VAR_GC_THRESH3] = {
3718			.procname	= "gc_thresh3",
3719			.maxlen		= sizeof(int),
3720			.mode		= 0644,
3721			.extra1		= SYSCTL_ZERO,
3722			.extra2		= SYSCTL_INT_MAX,
3723			.proc_handler	= proc_dointvec_minmax,
3724		},
 
3725	},
3726};
3727
3728int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3729			  proc_handler *handler)
3730{
3731	int i;
3732	struct neigh_sysctl_table *t;
3733	const char *dev_name_source;
3734	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3735	char *p_name;
3736	size_t neigh_vars_size;
3737
3738	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3739	if (!t)
3740		goto err;
3741
3742	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3743		t->neigh_vars[i].data += (long) p;
3744		t->neigh_vars[i].extra1 = dev;
3745		t->neigh_vars[i].extra2 = p;
3746	}
3747
3748	neigh_vars_size = ARRAY_SIZE(t->neigh_vars);
3749	if (dev) {
3750		dev_name_source = dev->name;
3751		/* Terminate the table early */
3752		neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
 
3753	} else {
3754		struct neigh_table *tbl = p->tbl;
3755		dev_name_source = "default";
3756		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3757		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3758		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3759		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3760	}
3761
3762	if (handler) {
3763		/* RetransTime */
3764		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3765		/* ReachableTime */
3766		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3767		/* RetransTime (in milliseconds)*/
3768		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3769		/* ReachableTime (in milliseconds) */
3770		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3771	} else {
3772		/* Those handlers will update p->reachable_time after
3773		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3774		 * applied after the next neighbour update instead of waiting for
3775		 * neigh_periodic_work to update its value (can be multiple minutes)
3776		 * So any handler that replaces them should do this as well
3777		 */
3778		/* ReachableTime */
3779		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3780			neigh_proc_base_reachable_time;
3781		/* ReachableTime (in milliseconds) */
3782		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3783			neigh_proc_base_reachable_time;
3784	}
3785
 
 
 
 
3786	switch (neigh_parms_family(p)) {
3787	case AF_INET:
3788	      p_name = "ipv4";
3789	      break;
3790	case AF_INET6:
3791	      p_name = "ipv6";
3792	      break;
3793	default:
3794	      BUG();
3795	}
3796
3797	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3798		p_name, dev_name_source);
3799	t->sysctl_header = register_net_sysctl_sz(neigh_parms_net(p),
3800						  neigh_path, t->neigh_vars,
3801						  neigh_vars_size);
3802	if (!t->sysctl_header)
3803		goto free;
3804
3805	p->sysctl_table = t;
3806	return 0;
3807
3808free:
3809	kfree(t);
3810err:
3811	return -ENOBUFS;
3812}
3813EXPORT_SYMBOL(neigh_sysctl_register);
3814
3815void neigh_sysctl_unregister(struct neigh_parms *p)
3816{
3817	if (p->sysctl_table) {
3818		struct neigh_sysctl_table *t = p->sysctl_table;
3819		p->sysctl_table = NULL;
3820		unregister_net_sysctl_table(t->sysctl_header);
3821		kfree(t);
3822	}
3823}
3824EXPORT_SYMBOL(neigh_sysctl_unregister);
3825
3826#endif	/* CONFIG_SYSCTL */
3827
3828static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = {
3829	{.msgtype = RTM_NEWNEIGH, .doit = neigh_add},
3830	{.msgtype = RTM_DELNEIGH, .doit = neigh_delete},
3831	{.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info,
3832	 .flags = RTNL_FLAG_DUMP_UNLOCKED},
3833	{.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info},
3834	{.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set},
3835};
3836
3837static int __init neigh_init(void)
3838{
3839	rtnl_register_many(neigh_rtnl_msg_handlers);
 
 
 
 
 
 
 
3840	return 0;
3841}
3842
3843subsys_initcall(neigh_init);