Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/slab.h>
17#include <linux/kmemleak.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
22#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
28#include <net/net_namespace.h>
29#include <net/neighbour.h>
30#include <net/arp.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/netevent.h>
34#include <net/netlink.h>
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
37#include <linux/string.h>
38#include <linux/log2.h>
39#include <linux/inetdevice.h>
40#include <net/addrconf.h>
41
42#include <trace/events/neigh.h>
43
44#define DEBUG
45#define NEIGH_DEBUG 1
46#define neigh_dbg(level, fmt, ...) \
47do { \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
50} while (0)
51
52#define PNEIGH_HASHMASK 0xF
53
54static void neigh_timer_handler(struct timer_list *t);
55static void __neigh_notify(struct neighbour *n, int type, int flags,
56 u32 pid);
57static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 struct net_device *dev);
60
61#ifdef CONFIG_PROC_FS
62static const struct seq_operations neigh_stat_seq_ops;
63#endif
64
65/*
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
67
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
72 cache.
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
75
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
79
80 Reference count prevents destruction.
81
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
84 - timer
85 - resolution queue
86
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
91 */
92
93static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94{
95 kfree_skb(skb);
96 return -ENETDOWN;
97}
98
99static void neigh_cleanup_and_release(struct neighbour *neigh)
100{
101 trace_neigh_cleanup_and_release(neigh, 0);
102 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
104 neigh_release(neigh);
105}
106
107/*
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
111 */
112
113unsigned long neigh_rand_reach_time(unsigned long base)
114{
115 return base ? (prandom_u32() % base) + (base >> 1) : 0;
116}
117EXPORT_SYMBOL(neigh_rand_reach_time);
118
119static void neigh_mark_dead(struct neighbour *n)
120{
121 n->dead = 1;
122 if (!list_empty(&n->gc_list)) {
123 list_del_init(&n->gc_list);
124 atomic_dec(&n->tbl->gc_entries);
125 }
126}
127
128static void neigh_update_gc_list(struct neighbour *n)
129{
130 bool on_gc_list, exempt_from_gc;
131
132 write_lock_bh(&n->tbl->lock);
133 write_lock(&n->lock);
134
135 /* remove from the gc list if new state is permanent or if neighbor
136 * is externally learned; otherwise entry should be on the gc list
137 */
138 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
139 n->flags & NTF_EXT_LEARNED;
140 on_gc_list = !list_empty(&n->gc_list);
141
142 if (exempt_from_gc && on_gc_list) {
143 list_del_init(&n->gc_list);
144 atomic_dec(&n->tbl->gc_entries);
145 } else if (!exempt_from_gc && !on_gc_list) {
146 /* add entries to the tail; cleaning removes from the front */
147 list_add_tail(&n->gc_list, &n->tbl->gc_list);
148 atomic_inc(&n->tbl->gc_entries);
149 }
150
151 write_unlock(&n->lock);
152 write_unlock_bh(&n->tbl->lock);
153}
154
155static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
156 int *notify)
157{
158 bool rc = false;
159 u8 ndm_flags;
160
161 if (!(flags & NEIGH_UPDATE_F_ADMIN))
162 return rc;
163
164 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
165 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
166 if (ndm_flags & NTF_EXT_LEARNED)
167 neigh->flags |= NTF_EXT_LEARNED;
168 else
169 neigh->flags &= ~NTF_EXT_LEARNED;
170 rc = true;
171 *notify = 1;
172 }
173
174 return rc;
175}
176
177static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
178 struct neigh_table *tbl)
179{
180 bool retval = false;
181
182 write_lock(&n->lock);
183 if (refcount_read(&n->refcnt) == 1) {
184 struct neighbour *neigh;
185
186 neigh = rcu_dereference_protected(n->next,
187 lockdep_is_held(&tbl->lock));
188 rcu_assign_pointer(*np, neigh);
189 neigh_mark_dead(n);
190 retval = true;
191 }
192 write_unlock(&n->lock);
193 if (retval)
194 neigh_cleanup_and_release(n);
195 return retval;
196}
197
198bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
199{
200 struct neigh_hash_table *nht;
201 void *pkey = ndel->primary_key;
202 u32 hash_val;
203 struct neighbour *n;
204 struct neighbour __rcu **np;
205
206 nht = rcu_dereference_protected(tbl->nht,
207 lockdep_is_held(&tbl->lock));
208 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
209 hash_val = hash_val >> (32 - nht->hash_shift);
210
211 np = &nht->hash_buckets[hash_val];
212 while ((n = rcu_dereference_protected(*np,
213 lockdep_is_held(&tbl->lock)))) {
214 if (n == ndel)
215 return neigh_del(n, np, tbl);
216 np = &n->next;
217 }
218 return false;
219}
220
221static int neigh_forced_gc(struct neigh_table *tbl)
222{
223 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
224 unsigned long tref = jiffies - 5 * HZ;
225 struct neighbour *n, *tmp;
226 int shrunk = 0;
227
228 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
229
230 write_lock_bh(&tbl->lock);
231
232 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
233 if (refcount_read(&n->refcnt) == 1) {
234 bool remove = false;
235
236 write_lock(&n->lock);
237 if ((n->nud_state == NUD_FAILED) ||
238 time_after(tref, n->updated))
239 remove = true;
240 write_unlock(&n->lock);
241
242 if (remove && neigh_remove_one(n, tbl))
243 shrunk++;
244 if (shrunk >= max_clean)
245 break;
246 }
247 }
248
249 tbl->last_flush = jiffies;
250
251 write_unlock_bh(&tbl->lock);
252
253 return shrunk;
254}
255
256static void neigh_add_timer(struct neighbour *n, unsigned long when)
257{
258 neigh_hold(n);
259 if (unlikely(mod_timer(&n->timer, when))) {
260 printk("NEIGH: BUG, double timer add, state is %x\n",
261 n->nud_state);
262 dump_stack();
263 }
264}
265
266static int neigh_del_timer(struct neighbour *n)
267{
268 if ((n->nud_state & NUD_IN_TIMER) &&
269 del_timer(&n->timer)) {
270 neigh_release(n);
271 return 1;
272 }
273 return 0;
274}
275
276static void pneigh_queue_purge(struct sk_buff_head *list)
277{
278 struct sk_buff *skb;
279
280 while ((skb = skb_dequeue(list)) != NULL) {
281 dev_put(skb->dev);
282 kfree_skb(skb);
283 }
284}
285
286static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
287 bool skip_perm)
288{
289 int i;
290 struct neigh_hash_table *nht;
291
292 nht = rcu_dereference_protected(tbl->nht,
293 lockdep_is_held(&tbl->lock));
294
295 for (i = 0; i < (1 << nht->hash_shift); i++) {
296 struct neighbour *n;
297 struct neighbour __rcu **np = &nht->hash_buckets[i];
298
299 while ((n = rcu_dereference_protected(*np,
300 lockdep_is_held(&tbl->lock))) != NULL) {
301 if (dev && n->dev != dev) {
302 np = &n->next;
303 continue;
304 }
305 if (skip_perm && n->nud_state & NUD_PERMANENT) {
306 np = &n->next;
307 continue;
308 }
309 rcu_assign_pointer(*np,
310 rcu_dereference_protected(n->next,
311 lockdep_is_held(&tbl->lock)));
312 write_lock(&n->lock);
313 neigh_del_timer(n);
314 neigh_mark_dead(n);
315 if (refcount_read(&n->refcnt) != 1) {
316 /* The most unpleasant situation.
317 We must destroy neighbour entry,
318 but someone still uses it.
319
320 The destroy will be delayed until
321 the last user releases us, but
322 we must kill timers etc. and move
323 it to safe state.
324 */
325 __skb_queue_purge(&n->arp_queue);
326 n->arp_queue_len_bytes = 0;
327 n->output = neigh_blackhole;
328 if (n->nud_state & NUD_VALID)
329 n->nud_state = NUD_NOARP;
330 else
331 n->nud_state = NUD_NONE;
332 neigh_dbg(2, "neigh %p is stray\n", n);
333 }
334 write_unlock(&n->lock);
335 neigh_cleanup_and_release(n);
336 }
337 }
338}
339
340void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
341{
342 write_lock_bh(&tbl->lock);
343 neigh_flush_dev(tbl, dev, false);
344 write_unlock_bh(&tbl->lock);
345}
346EXPORT_SYMBOL(neigh_changeaddr);
347
348static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
349 bool skip_perm)
350{
351 write_lock_bh(&tbl->lock);
352 neigh_flush_dev(tbl, dev, skip_perm);
353 pneigh_ifdown_and_unlock(tbl, dev);
354
355 del_timer_sync(&tbl->proxy_timer);
356 pneigh_queue_purge(&tbl->proxy_queue);
357 return 0;
358}
359
360int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
361{
362 __neigh_ifdown(tbl, dev, true);
363 return 0;
364}
365EXPORT_SYMBOL(neigh_carrier_down);
366
367int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
368{
369 __neigh_ifdown(tbl, dev, false);
370 return 0;
371}
372EXPORT_SYMBOL(neigh_ifdown);
373
374static struct neighbour *neigh_alloc(struct neigh_table *tbl,
375 struct net_device *dev,
376 bool exempt_from_gc)
377{
378 struct neighbour *n = NULL;
379 unsigned long now = jiffies;
380 int entries;
381
382 if (exempt_from_gc)
383 goto do_alloc;
384
385 entries = atomic_inc_return(&tbl->gc_entries) - 1;
386 if (entries >= tbl->gc_thresh3 ||
387 (entries >= tbl->gc_thresh2 &&
388 time_after(now, tbl->last_flush + 5 * HZ))) {
389 if (!neigh_forced_gc(tbl) &&
390 entries >= tbl->gc_thresh3) {
391 net_info_ratelimited("%s: neighbor table overflow!\n",
392 tbl->id);
393 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
394 goto out_entries;
395 }
396 }
397
398do_alloc:
399 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
400 if (!n)
401 goto out_entries;
402
403 __skb_queue_head_init(&n->arp_queue);
404 rwlock_init(&n->lock);
405 seqlock_init(&n->ha_lock);
406 n->updated = n->used = now;
407 n->nud_state = NUD_NONE;
408 n->output = neigh_blackhole;
409 seqlock_init(&n->hh.hh_lock);
410 n->parms = neigh_parms_clone(&tbl->parms);
411 timer_setup(&n->timer, neigh_timer_handler, 0);
412
413 NEIGH_CACHE_STAT_INC(tbl, allocs);
414 n->tbl = tbl;
415 refcount_set(&n->refcnt, 1);
416 n->dead = 1;
417 INIT_LIST_HEAD(&n->gc_list);
418
419 atomic_inc(&tbl->entries);
420out:
421 return n;
422
423out_entries:
424 if (!exempt_from_gc)
425 atomic_dec(&tbl->gc_entries);
426 goto out;
427}
428
429static void neigh_get_hash_rnd(u32 *x)
430{
431 *x = get_random_u32() | 1;
432}
433
434static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
435{
436 size_t size = (1 << shift) * sizeof(struct neighbour *);
437 struct neigh_hash_table *ret;
438 struct neighbour __rcu **buckets;
439 int i;
440
441 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
442 if (!ret)
443 return NULL;
444 if (size <= PAGE_SIZE) {
445 buckets = kzalloc(size, GFP_ATOMIC);
446 } else {
447 buckets = (struct neighbour __rcu **)
448 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
449 get_order(size));
450 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
451 }
452 if (!buckets) {
453 kfree(ret);
454 return NULL;
455 }
456 ret->hash_buckets = buckets;
457 ret->hash_shift = shift;
458 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
459 neigh_get_hash_rnd(&ret->hash_rnd[i]);
460 return ret;
461}
462
463static void neigh_hash_free_rcu(struct rcu_head *head)
464{
465 struct neigh_hash_table *nht = container_of(head,
466 struct neigh_hash_table,
467 rcu);
468 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
469 struct neighbour __rcu **buckets = nht->hash_buckets;
470
471 if (size <= PAGE_SIZE) {
472 kfree(buckets);
473 } else {
474 kmemleak_free(buckets);
475 free_pages((unsigned long)buckets, get_order(size));
476 }
477 kfree(nht);
478}
479
480static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
481 unsigned long new_shift)
482{
483 unsigned int i, hash;
484 struct neigh_hash_table *new_nht, *old_nht;
485
486 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
487
488 old_nht = rcu_dereference_protected(tbl->nht,
489 lockdep_is_held(&tbl->lock));
490 new_nht = neigh_hash_alloc(new_shift);
491 if (!new_nht)
492 return old_nht;
493
494 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
495 struct neighbour *n, *next;
496
497 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
498 lockdep_is_held(&tbl->lock));
499 n != NULL;
500 n = next) {
501 hash = tbl->hash(n->primary_key, n->dev,
502 new_nht->hash_rnd);
503
504 hash >>= (32 - new_nht->hash_shift);
505 next = rcu_dereference_protected(n->next,
506 lockdep_is_held(&tbl->lock));
507
508 rcu_assign_pointer(n->next,
509 rcu_dereference_protected(
510 new_nht->hash_buckets[hash],
511 lockdep_is_held(&tbl->lock)));
512 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
513 }
514 }
515
516 rcu_assign_pointer(tbl->nht, new_nht);
517 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
518 return new_nht;
519}
520
521struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
522 struct net_device *dev)
523{
524 struct neighbour *n;
525
526 NEIGH_CACHE_STAT_INC(tbl, lookups);
527
528 rcu_read_lock_bh();
529 n = __neigh_lookup_noref(tbl, pkey, dev);
530 if (n) {
531 if (!refcount_inc_not_zero(&n->refcnt))
532 n = NULL;
533 NEIGH_CACHE_STAT_INC(tbl, hits);
534 }
535
536 rcu_read_unlock_bh();
537 return n;
538}
539EXPORT_SYMBOL(neigh_lookup);
540
541struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
542 const void *pkey)
543{
544 struct neighbour *n;
545 unsigned int key_len = tbl->key_len;
546 u32 hash_val;
547 struct neigh_hash_table *nht;
548
549 NEIGH_CACHE_STAT_INC(tbl, lookups);
550
551 rcu_read_lock_bh();
552 nht = rcu_dereference_bh(tbl->nht);
553 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
554
555 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
556 n != NULL;
557 n = rcu_dereference_bh(n->next)) {
558 if (!memcmp(n->primary_key, pkey, key_len) &&
559 net_eq(dev_net(n->dev), net)) {
560 if (!refcount_inc_not_zero(&n->refcnt))
561 n = NULL;
562 NEIGH_CACHE_STAT_INC(tbl, hits);
563 break;
564 }
565 }
566
567 rcu_read_unlock_bh();
568 return n;
569}
570EXPORT_SYMBOL(neigh_lookup_nodev);
571
572static struct neighbour *___neigh_create(struct neigh_table *tbl,
573 const void *pkey,
574 struct net_device *dev,
575 bool exempt_from_gc, bool want_ref)
576{
577 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
578 u32 hash_val;
579 unsigned int key_len = tbl->key_len;
580 int error;
581 struct neigh_hash_table *nht;
582
583 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
584
585 if (!n) {
586 rc = ERR_PTR(-ENOBUFS);
587 goto out;
588 }
589
590 memcpy(n->primary_key, pkey, key_len);
591 n->dev = dev;
592 dev_hold(dev);
593
594 /* Protocol specific setup. */
595 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
596 rc = ERR_PTR(error);
597 goto out_neigh_release;
598 }
599
600 if (dev->netdev_ops->ndo_neigh_construct) {
601 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
602 if (error < 0) {
603 rc = ERR_PTR(error);
604 goto out_neigh_release;
605 }
606 }
607
608 /* Device specific setup. */
609 if (n->parms->neigh_setup &&
610 (error = n->parms->neigh_setup(n)) < 0) {
611 rc = ERR_PTR(error);
612 goto out_neigh_release;
613 }
614
615 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
616
617 write_lock_bh(&tbl->lock);
618 nht = rcu_dereference_protected(tbl->nht,
619 lockdep_is_held(&tbl->lock));
620
621 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
622 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
623
624 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
625
626 if (n->parms->dead) {
627 rc = ERR_PTR(-EINVAL);
628 goto out_tbl_unlock;
629 }
630
631 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
632 lockdep_is_held(&tbl->lock));
633 n1 != NULL;
634 n1 = rcu_dereference_protected(n1->next,
635 lockdep_is_held(&tbl->lock))) {
636 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
637 if (want_ref)
638 neigh_hold(n1);
639 rc = n1;
640 goto out_tbl_unlock;
641 }
642 }
643
644 n->dead = 0;
645 if (!exempt_from_gc)
646 list_add_tail(&n->gc_list, &n->tbl->gc_list);
647
648 if (want_ref)
649 neigh_hold(n);
650 rcu_assign_pointer(n->next,
651 rcu_dereference_protected(nht->hash_buckets[hash_val],
652 lockdep_is_held(&tbl->lock)));
653 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
654 write_unlock_bh(&tbl->lock);
655 neigh_dbg(2, "neigh %p is created\n", n);
656 rc = n;
657out:
658 return rc;
659out_tbl_unlock:
660 write_unlock_bh(&tbl->lock);
661out_neigh_release:
662 if (!exempt_from_gc)
663 atomic_dec(&tbl->gc_entries);
664 neigh_release(n);
665 goto out;
666}
667
668struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
669 struct net_device *dev, bool want_ref)
670{
671 return ___neigh_create(tbl, pkey, dev, false, want_ref);
672}
673EXPORT_SYMBOL(__neigh_create);
674
675static u32 pneigh_hash(const void *pkey, unsigned int key_len)
676{
677 u32 hash_val = *(u32 *)(pkey + key_len - 4);
678 hash_val ^= (hash_val >> 16);
679 hash_val ^= hash_val >> 8;
680 hash_val ^= hash_val >> 4;
681 hash_val &= PNEIGH_HASHMASK;
682 return hash_val;
683}
684
685static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
686 struct net *net,
687 const void *pkey,
688 unsigned int key_len,
689 struct net_device *dev)
690{
691 while (n) {
692 if (!memcmp(n->key, pkey, key_len) &&
693 net_eq(pneigh_net(n), net) &&
694 (n->dev == dev || !n->dev))
695 return n;
696 n = n->next;
697 }
698 return NULL;
699}
700
701struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
702 struct net *net, const void *pkey, struct net_device *dev)
703{
704 unsigned int key_len = tbl->key_len;
705 u32 hash_val = pneigh_hash(pkey, key_len);
706
707 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
708 net, pkey, key_len, dev);
709}
710EXPORT_SYMBOL_GPL(__pneigh_lookup);
711
712struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
713 struct net *net, const void *pkey,
714 struct net_device *dev, int creat)
715{
716 struct pneigh_entry *n;
717 unsigned int key_len = tbl->key_len;
718 u32 hash_val = pneigh_hash(pkey, key_len);
719
720 read_lock_bh(&tbl->lock);
721 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
722 net, pkey, key_len, dev);
723 read_unlock_bh(&tbl->lock);
724
725 if (n || !creat)
726 goto out;
727
728 ASSERT_RTNL();
729
730 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
731 if (!n)
732 goto out;
733
734 n->protocol = 0;
735 write_pnet(&n->net, net);
736 memcpy(n->key, pkey, key_len);
737 n->dev = dev;
738 if (dev)
739 dev_hold(dev);
740
741 if (tbl->pconstructor && tbl->pconstructor(n)) {
742 if (dev)
743 dev_put(dev);
744 kfree(n);
745 n = NULL;
746 goto out;
747 }
748
749 write_lock_bh(&tbl->lock);
750 n->next = tbl->phash_buckets[hash_val];
751 tbl->phash_buckets[hash_val] = n;
752 write_unlock_bh(&tbl->lock);
753out:
754 return n;
755}
756EXPORT_SYMBOL(pneigh_lookup);
757
758
759int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
760 struct net_device *dev)
761{
762 struct pneigh_entry *n, **np;
763 unsigned int key_len = tbl->key_len;
764 u32 hash_val = pneigh_hash(pkey, key_len);
765
766 write_lock_bh(&tbl->lock);
767 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
768 np = &n->next) {
769 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
770 net_eq(pneigh_net(n), net)) {
771 *np = n->next;
772 write_unlock_bh(&tbl->lock);
773 if (tbl->pdestructor)
774 tbl->pdestructor(n);
775 if (n->dev)
776 dev_put(n->dev);
777 kfree(n);
778 return 0;
779 }
780 }
781 write_unlock_bh(&tbl->lock);
782 return -ENOENT;
783}
784
785static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
786 struct net_device *dev)
787{
788 struct pneigh_entry *n, **np, *freelist = NULL;
789 u32 h;
790
791 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
792 np = &tbl->phash_buckets[h];
793 while ((n = *np) != NULL) {
794 if (!dev || n->dev == dev) {
795 *np = n->next;
796 n->next = freelist;
797 freelist = n;
798 continue;
799 }
800 np = &n->next;
801 }
802 }
803 write_unlock_bh(&tbl->lock);
804 while ((n = freelist)) {
805 freelist = n->next;
806 n->next = NULL;
807 if (tbl->pdestructor)
808 tbl->pdestructor(n);
809 if (n->dev)
810 dev_put(n->dev);
811 kfree(n);
812 }
813 return -ENOENT;
814}
815
816static void neigh_parms_destroy(struct neigh_parms *parms);
817
818static inline void neigh_parms_put(struct neigh_parms *parms)
819{
820 if (refcount_dec_and_test(&parms->refcnt))
821 neigh_parms_destroy(parms);
822}
823
824/*
825 * neighbour must already be out of the table;
826 *
827 */
828void neigh_destroy(struct neighbour *neigh)
829{
830 struct net_device *dev = neigh->dev;
831
832 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
833
834 if (!neigh->dead) {
835 pr_warn("Destroying alive neighbour %p\n", neigh);
836 dump_stack();
837 return;
838 }
839
840 if (neigh_del_timer(neigh))
841 pr_warn("Impossible event\n");
842
843 write_lock_bh(&neigh->lock);
844 __skb_queue_purge(&neigh->arp_queue);
845 write_unlock_bh(&neigh->lock);
846 neigh->arp_queue_len_bytes = 0;
847
848 if (dev->netdev_ops->ndo_neigh_destroy)
849 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
850
851 dev_put(dev);
852 neigh_parms_put(neigh->parms);
853
854 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
855
856 atomic_dec(&neigh->tbl->entries);
857 kfree_rcu(neigh, rcu);
858}
859EXPORT_SYMBOL(neigh_destroy);
860
861/* Neighbour state is suspicious;
862 disable fast path.
863
864 Called with write_locked neigh.
865 */
866static void neigh_suspect(struct neighbour *neigh)
867{
868 neigh_dbg(2, "neigh %p is suspected\n", neigh);
869
870 neigh->output = neigh->ops->output;
871}
872
873/* Neighbour state is OK;
874 enable fast path.
875
876 Called with write_locked neigh.
877 */
878static void neigh_connect(struct neighbour *neigh)
879{
880 neigh_dbg(2, "neigh %p is connected\n", neigh);
881
882 neigh->output = neigh->ops->connected_output;
883}
884
885static void neigh_periodic_work(struct work_struct *work)
886{
887 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
888 struct neighbour *n;
889 struct neighbour __rcu **np;
890 unsigned int i;
891 struct neigh_hash_table *nht;
892
893 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
894
895 write_lock_bh(&tbl->lock);
896 nht = rcu_dereference_protected(tbl->nht,
897 lockdep_is_held(&tbl->lock));
898
899 /*
900 * periodically recompute ReachableTime from random function
901 */
902
903 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
904 struct neigh_parms *p;
905 tbl->last_rand = jiffies;
906 list_for_each_entry(p, &tbl->parms_list, list)
907 p->reachable_time =
908 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
909 }
910
911 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
912 goto out;
913
914 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
915 np = &nht->hash_buckets[i];
916
917 while ((n = rcu_dereference_protected(*np,
918 lockdep_is_held(&tbl->lock))) != NULL) {
919 unsigned int state;
920
921 write_lock(&n->lock);
922
923 state = n->nud_state;
924 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
925 (n->flags & NTF_EXT_LEARNED)) {
926 write_unlock(&n->lock);
927 goto next_elt;
928 }
929
930 if (time_before(n->used, n->confirmed))
931 n->used = n->confirmed;
932
933 if (refcount_read(&n->refcnt) == 1 &&
934 (state == NUD_FAILED ||
935 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
936 *np = n->next;
937 neigh_mark_dead(n);
938 write_unlock(&n->lock);
939 neigh_cleanup_and_release(n);
940 continue;
941 }
942 write_unlock(&n->lock);
943
944next_elt:
945 np = &n->next;
946 }
947 /*
948 * It's fine to release lock here, even if hash table
949 * grows while we are preempted.
950 */
951 write_unlock_bh(&tbl->lock);
952 cond_resched();
953 write_lock_bh(&tbl->lock);
954 nht = rcu_dereference_protected(tbl->nht,
955 lockdep_is_held(&tbl->lock));
956 }
957out:
958 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
959 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
960 * BASE_REACHABLE_TIME.
961 */
962 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
963 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
964 write_unlock_bh(&tbl->lock);
965}
966
967static __inline__ int neigh_max_probes(struct neighbour *n)
968{
969 struct neigh_parms *p = n->parms;
970 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
971 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
972 NEIGH_VAR(p, MCAST_PROBES));
973}
974
975static void neigh_invalidate(struct neighbour *neigh)
976 __releases(neigh->lock)
977 __acquires(neigh->lock)
978{
979 struct sk_buff *skb;
980
981 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
982 neigh_dbg(2, "neigh %p is failed\n", neigh);
983 neigh->updated = jiffies;
984
985 /* It is very thin place. report_unreachable is very complicated
986 routine. Particularly, it can hit the same neighbour entry!
987
988 So that, we try to be accurate and avoid dead loop. --ANK
989 */
990 while (neigh->nud_state == NUD_FAILED &&
991 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
992 write_unlock(&neigh->lock);
993 neigh->ops->error_report(neigh, skb);
994 write_lock(&neigh->lock);
995 }
996 __skb_queue_purge(&neigh->arp_queue);
997 neigh->arp_queue_len_bytes = 0;
998}
999
1000static void neigh_probe(struct neighbour *neigh)
1001 __releases(neigh->lock)
1002{
1003 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1004 /* keep skb alive even if arp_queue overflows */
1005 if (skb)
1006 skb = skb_clone(skb, GFP_ATOMIC);
1007 write_unlock(&neigh->lock);
1008 if (neigh->ops->solicit)
1009 neigh->ops->solicit(neigh, skb);
1010 atomic_inc(&neigh->probes);
1011 consume_skb(skb);
1012}
1013
1014/* Called when a timer expires for a neighbour entry. */
1015
1016static void neigh_timer_handler(struct timer_list *t)
1017{
1018 unsigned long now, next;
1019 struct neighbour *neigh = from_timer(neigh, t, timer);
1020 unsigned int state;
1021 int notify = 0;
1022
1023 write_lock(&neigh->lock);
1024
1025 state = neigh->nud_state;
1026 now = jiffies;
1027 next = now + HZ;
1028
1029 if (!(state & NUD_IN_TIMER))
1030 goto out;
1031
1032 if (state & NUD_REACHABLE) {
1033 if (time_before_eq(now,
1034 neigh->confirmed + neigh->parms->reachable_time)) {
1035 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1036 next = neigh->confirmed + neigh->parms->reachable_time;
1037 } else if (time_before_eq(now,
1038 neigh->used +
1039 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1040 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1041 neigh->nud_state = NUD_DELAY;
1042 neigh->updated = jiffies;
1043 neigh_suspect(neigh);
1044 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1045 } else {
1046 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1047 neigh->nud_state = NUD_STALE;
1048 neigh->updated = jiffies;
1049 neigh_suspect(neigh);
1050 notify = 1;
1051 }
1052 } else if (state & NUD_DELAY) {
1053 if (time_before_eq(now,
1054 neigh->confirmed +
1055 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1056 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1057 neigh->nud_state = NUD_REACHABLE;
1058 neigh->updated = jiffies;
1059 neigh_connect(neigh);
1060 notify = 1;
1061 next = neigh->confirmed + neigh->parms->reachable_time;
1062 } else {
1063 neigh_dbg(2, "neigh %p is probed\n", neigh);
1064 neigh->nud_state = NUD_PROBE;
1065 neigh->updated = jiffies;
1066 atomic_set(&neigh->probes, 0);
1067 notify = 1;
1068 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1069 HZ/100);
1070 }
1071 } else {
1072 /* NUD_PROBE|NUD_INCOMPLETE */
1073 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1074 }
1075
1076 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1077 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1078 neigh->nud_state = NUD_FAILED;
1079 notify = 1;
1080 neigh_invalidate(neigh);
1081 goto out;
1082 }
1083
1084 if (neigh->nud_state & NUD_IN_TIMER) {
1085 if (time_before(next, jiffies + HZ/100))
1086 next = jiffies + HZ/100;
1087 if (!mod_timer(&neigh->timer, next))
1088 neigh_hold(neigh);
1089 }
1090 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1091 neigh_probe(neigh);
1092 } else {
1093out:
1094 write_unlock(&neigh->lock);
1095 }
1096
1097 if (notify)
1098 neigh_update_notify(neigh, 0);
1099
1100 trace_neigh_timer_handler(neigh, 0);
1101
1102 neigh_release(neigh);
1103}
1104
1105int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1106{
1107 int rc;
1108 bool immediate_probe = false;
1109
1110 write_lock_bh(&neigh->lock);
1111
1112 rc = 0;
1113 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1114 goto out_unlock_bh;
1115 if (neigh->dead)
1116 goto out_dead;
1117
1118 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1119 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1120 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1121 unsigned long next, now = jiffies;
1122
1123 atomic_set(&neigh->probes,
1124 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1125 neigh_del_timer(neigh);
1126 neigh->nud_state = NUD_INCOMPLETE;
1127 neigh->updated = now;
1128 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1129 HZ/100);
1130 neigh_add_timer(neigh, next);
1131 immediate_probe = true;
1132 } else {
1133 neigh->nud_state = NUD_FAILED;
1134 neigh->updated = jiffies;
1135 write_unlock_bh(&neigh->lock);
1136
1137 kfree_skb(skb);
1138 return 1;
1139 }
1140 } else if (neigh->nud_state & NUD_STALE) {
1141 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1142 neigh_del_timer(neigh);
1143 neigh->nud_state = NUD_DELAY;
1144 neigh->updated = jiffies;
1145 neigh_add_timer(neigh, jiffies +
1146 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1147 }
1148
1149 if (neigh->nud_state == NUD_INCOMPLETE) {
1150 if (skb) {
1151 while (neigh->arp_queue_len_bytes + skb->truesize >
1152 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1153 struct sk_buff *buff;
1154
1155 buff = __skb_dequeue(&neigh->arp_queue);
1156 if (!buff)
1157 break;
1158 neigh->arp_queue_len_bytes -= buff->truesize;
1159 kfree_skb(buff);
1160 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1161 }
1162 skb_dst_force(skb);
1163 __skb_queue_tail(&neigh->arp_queue, skb);
1164 neigh->arp_queue_len_bytes += skb->truesize;
1165 }
1166 rc = 1;
1167 }
1168out_unlock_bh:
1169 if (immediate_probe)
1170 neigh_probe(neigh);
1171 else
1172 write_unlock(&neigh->lock);
1173 local_bh_enable();
1174 trace_neigh_event_send_done(neigh, rc);
1175 return rc;
1176
1177out_dead:
1178 if (neigh->nud_state & NUD_STALE)
1179 goto out_unlock_bh;
1180 write_unlock_bh(&neigh->lock);
1181 kfree_skb(skb);
1182 trace_neigh_event_send_dead(neigh, 1);
1183 return 1;
1184}
1185EXPORT_SYMBOL(__neigh_event_send);
1186
1187static void neigh_update_hhs(struct neighbour *neigh)
1188{
1189 struct hh_cache *hh;
1190 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1191 = NULL;
1192
1193 if (neigh->dev->header_ops)
1194 update = neigh->dev->header_ops->cache_update;
1195
1196 if (update) {
1197 hh = &neigh->hh;
1198 if (READ_ONCE(hh->hh_len)) {
1199 write_seqlock_bh(&hh->hh_lock);
1200 update(hh, neigh->dev, neigh->ha);
1201 write_sequnlock_bh(&hh->hh_lock);
1202 }
1203 }
1204}
1205
1206
1207
1208/* Generic update routine.
1209 -- lladdr is new lladdr or NULL, if it is not supplied.
1210 -- new is new state.
1211 -- flags
1212 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1213 if it is different.
1214 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1215 lladdr instead of overriding it
1216 if it is different.
1217 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1218
1219 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1220 NTF_ROUTER flag.
1221 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1222 a router.
1223
1224 Caller MUST hold reference count on the entry.
1225 */
1226
1227static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1228 u8 new, u32 flags, u32 nlmsg_pid,
1229 struct netlink_ext_ack *extack)
1230{
1231 bool ext_learn_change = false;
1232 u8 old;
1233 int err;
1234 int notify = 0;
1235 struct net_device *dev;
1236 int update_isrouter = 0;
1237
1238 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1239
1240 write_lock_bh(&neigh->lock);
1241
1242 dev = neigh->dev;
1243 old = neigh->nud_state;
1244 err = -EPERM;
1245
1246 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1247 (old & (NUD_NOARP | NUD_PERMANENT)))
1248 goto out;
1249 if (neigh->dead) {
1250 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1251 goto out;
1252 }
1253
1254 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify);
1255
1256 if (!(new & NUD_VALID)) {
1257 neigh_del_timer(neigh);
1258 if (old & NUD_CONNECTED)
1259 neigh_suspect(neigh);
1260 neigh->nud_state = new;
1261 err = 0;
1262 notify = old & NUD_VALID;
1263 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1264 (new & NUD_FAILED)) {
1265 neigh_invalidate(neigh);
1266 notify = 1;
1267 }
1268 goto out;
1269 }
1270
1271 /* Compare new lladdr with cached one */
1272 if (!dev->addr_len) {
1273 /* First case: device needs no address. */
1274 lladdr = neigh->ha;
1275 } else if (lladdr) {
1276 /* The second case: if something is already cached
1277 and a new address is proposed:
1278 - compare new & old
1279 - if they are different, check override flag
1280 */
1281 if ((old & NUD_VALID) &&
1282 !memcmp(lladdr, neigh->ha, dev->addr_len))
1283 lladdr = neigh->ha;
1284 } else {
1285 /* No address is supplied; if we know something,
1286 use it, otherwise discard the request.
1287 */
1288 err = -EINVAL;
1289 if (!(old & NUD_VALID)) {
1290 NL_SET_ERR_MSG(extack, "No link layer address given");
1291 goto out;
1292 }
1293 lladdr = neigh->ha;
1294 }
1295
1296 /* Update confirmed timestamp for neighbour entry after we
1297 * received ARP packet even if it doesn't change IP to MAC binding.
1298 */
1299 if (new & NUD_CONNECTED)
1300 neigh->confirmed = jiffies;
1301
1302 /* If entry was valid and address is not changed,
1303 do not change entry state, if new one is STALE.
1304 */
1305 err = 0;
1306 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1307 if (old & NUD_VALID) {
1308 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1309 update_isrouter = 0;
1310 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1311 (old & NUD_CONNECTED)) {
1312 lladdr = neigh->ha;
1313 new = NUD_STALE;
1314 } else
1315 goto out;
1316 } else {
1317 if (lladdr == neigh->ha && new == NUD_STALE &&
1318 !(flags & NEIGH_UPDATE_F_ADMIN))
1319 new = old;
1320 }
1321 }
1322
1323 /* Update timestamp only once we know we will make a change to the
1324 * neighbour entry. Otherwise we risk to move the locktime window with
1325 * noop updates and ignore relevant ARP updates.
1326 */
1327 if (new != old || lladdr != neigh->ha)
1328 neigh->updated = jiffies;
1329
1330 if (new != old) {
1331 neigh_del_timer(neigh);
1332 if (new & NUD_PROBE)
1333 atomic_set(&neigh->probes, 0);
1334 if (new & NUD_IN_TIMER)
1335 neigh_add_timer(neigh, (jiffies +
1336 ((new & NUD_REACHABLE) ?
1337 neigh->parms->reachable_time :
1338 0)));
1339 neigh->nud_state = new;
1340 notify = 1;
1341 }
1342
1343 if (lladdr != neigh->ha) {
1344 write_seqlock(&neigh->ha_lock);
1345 memcpy(&neigh->ha, lladdr, dev->addr_len);
1346 write_sequnlock(&neigh->ha_lock);
1347 neigh_update_hhs(neigh);
1348 if (!(new & NUD_CONNECTED))
1349 neigh->confirmed = jiffies -
1350 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1351 notify = 1;
1352 }
1353 if (new == old)
1354 goto out;
1355 if (new & NUD_CONNECTED)
1356 neigh_connect(neigh);
1357 else
1358 neigh_suspect(neigh);
1359 if (!(old & NUD_VALID)) {
1360 struct sk_buff *skb;
1361
1362 /* Again: avoid dead loop if something went wrong */
1363
1364 while (neigh->nud_state & NUD_VALID &&
1365 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1366 struct dst_entry *dst = skb_dst(skb);
1367 struct neighbour *n2, *n1 = neigh;
1368 write_unlock_bh(&neigh->lock);
1369
1370 rcu_read_lock();
1371
1372 /* Why not just use 'neigh' as-is? The problem is that
1373 * things such as shaper, eql, and sch_teql can end up
1374 * using alternative, different, neigh objects to output
1375 * the packet in the output path. So what we need to do
1376 * here is re-lookup the top-level neigh in the path so
1377 * we can reinject the packet there.
1378 */
1379 n2 = NULL;
1380 if (dst) {
1381 n2 = dst_neigh_lookup_skb(dst, skb);
1382 if (n2)
1383 n1 = n2;
1384 }
1385 n1->output(n1, skb);
1386 if (n2)
1387 neigh_release(n2);
1388 rcu_read_unlock();
1389
1390 write_lock_bh(&neigh->lock);
1391 }
1392 __skb_queue_purge(&neigh->arp_queue);
1393 neigh->arp_queue_len_bytes = 0;
1394 }
1395out:
1396 if (update_isrouter)
1397 neigh_update_is_router(neigh, flags, ¬ify);
1398 write_unlock_bh(&neigh->lock);
1399
1400 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1401 neigh_update_gc_list(neigh);
1402
1403 if (notify)
1404 neigh_update_notify(neigh, nlmsg_pid);
1405
1406 trace_neigh_update_done(neigh, err);
1407
1408 return err;
1409}
1410
1411int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1412 u32 flags, u32 nlmsg_pid)
1413{
1414 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1415}
1416EXPORT_SYMBOL(neigh_update);
1417
1418/* Update the neigh to listen temporarily for probe responses, even if it is
1419 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1420 */
1421void __neigh_set_probe_once(struct neighbour *neigh)
1422{
1423 if (neigh->dead)
1424 return;
1425 neigh->updated = jiffies;
1426 if (!(neigh->nud_state & NUD_FAILED))
1427 return;
1428 neigh->nud_state = NUD_INCOMPLETE;
1429 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1430 neigh_add_timer(neigh,
1431 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1432 HZ/100));
1433}
1434EXPORT_SYMBOL(__neigh_set_probe_once);
1435
1436struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1437 u8 *lladdr, void *saddr,
1438 struct net_device *dev)
1439{
1440 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1441 lladdr || !dev->addr_len);
1442 if (neigh)
1443 neigh_update(neigh, lladdr, NUD_STALE,
1444 NEIGH_UPDATE_F_OVERRIDE, 0);
1445 return neigh;
1446}
1447EXPORT_SYMBOL(neigh_event_ns);
1448
1449/* called with read_lock_bh(&n->lock); */
1450static void neigh_hh_init(struct neighbour *n)
1451{
1452 struct net_device *dev = n->dev;
1453 __be16 prot = n->tbl->protocol;
1454 struct hh_cache *hh = &n->hh;
1455
1456 write_lock_bh(&n->lock);
1457
1458 /* Only one thread can come in here and initialize the
1459 * hh_cache entry.
1460 */
1461 if (!hh->hh_len)
1462 dev->header_ops->cache(n, hh, prot);
1463
1464 write_unlock_bh(&n->lock);
1465}
1466
1467/* Slow and careful. */
1468
1469int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1470{
1471 int rc = 0;
1472
1473 if (!neigh_event_send(neigh, skb)) {
1474 int err;
1475 struct net_device *dev = neigh->dev;
1476 unsigned int seq;
1477
1478 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1479 neigh_hh_init(neigh);
1480
1481 do {
1482 __skb_pull(skb, skb_network_offset(skb));
1483 seq = read_seqbegin(&neigh->ha_lock);
1484 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1485 neigh->ha, NULL, skb->len);
1486 } while (read_seqretry(&neigh->ha_lock, seq));
1487
1488 if (err >= 0)
1489 rc = dev_queue_xmit(skb);
1490 else
1491 goto out_kfree_skb;
1492 }
1493out:
1494 return rc;
1495out_kfree_skb:
1496 rc = -EINVAL;
1497 kfree_skb(skb);
1498 goto out;
1499}
1500EXPORT_SYMBOL(neigh_resolve_output);
1501
1502/* As fast as possible without hh cache */
1503
1504int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1505{
1506 struct net_device *dev = neigh->dev;
1507 unsigned int seq;
1508 int err;
1509
1510 do {
1511 __skb_pull(skb, skb_network_offset(skb));
1512 seq = read_seqbegin(&neigh->ha_lock);
1513 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1514 neigh->ha, NULL, skb->len);
1515 } while (read_seqretry(&neigh->ha_lock, seq));
1516
1517 if (err >= 0)
1518 err = dev_queue_xmit(skb);
1519 else {
1520 err = -EINVAL;
1521 kfree_skb(skb);
1522 }
1523 return err;
1524}
1525EXPORT_SYMBOL(neigh_connected_output);
1526
1527int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1528{
1529 return dev_queue_xmit(skb);
1530}
1531EXPORT_SYMBOL(neigh_direct_output);
1532
1533static void neigh_proxy_process(struct timer_list *t)
1534{
1535 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1536 long sched_next = 0;
1537 unsigned long now = jiffies;
1538 struct sk_buff *skb, *n;
1539
1540 spin_lock(&tbl->proxy_queue.lock);
1541
1542 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1543 long tdif = NEIGH_CB(skb)->sched_next - now;
1544
1545 if (tdif <= 0) {
1546 struct net_device *dev = skb->dev;
1547
1548 __skb_unlink(skb, &tbl->proxy_queue);
1549 if (tbl->proxy_redo && netif_running(dev)) {
1550 rcu_read_lock();
1551 tbl->proxy_redo(skb);
1552 rcu_read_unlock();
1553 } else {
1554 kfree_skb(skb);
1555 }
1556
1557 dev_put(dev);
1558 } else if (!sched_next || tdif < sched_next)
1559 sched_next = tdif;
1560 }
1561 del_timer(&tbl->proxy_timer);
1562 if (sched_next)
1563 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1564 spin_unlock(&tbl->proxy_queue.lock);
1565}
1566
1567void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1568 struct sk_buff *skb)
1569{
1570 unsigned long now = jiffies;
1571
1572 unsigned long sched_next = now + (prandom_u32() %
1573 NEIGH_VAR(p, PROXY_DELAY));
1574
1575 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1576 kfree_skb(skb);
1577 return;
1578 }
1579
1580 NEIGH_CB(skb)->sched_next = sched_next;
1581 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1582
1583 spin_lock(&tbl->proxy_queue.lock);
1584 if (del_timer(&tbl->proxy_timer)) {
1585 if (time_before(tbl->proxy_timer.expires, sched_next))
1586 sched_next = tbl->proxy_timer.expires;
1587 }
1588 skb_dst_drop(skb);
1589 dev_hold(skb->dev);
1590 __skb_queue_tail(&tbl->proxy_queue, skb);
1591 mod_timer(&tbl->proxy_timer, sched_next);
1592 spin_unlock(&tbl->proxy_queue.lock);
1593}
1594EXPORT_SYMBOL(pneigh_enqueue);
1595
1596static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1597 struct net *net, int ifindex)
1598{
1599 struct neigh_parms *p;
1600
1601 list_for_each_entry(p, &tbl->parms_list, list) {
1602 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1603 (!p->dev && !ifindex && net_eq(net, &init_net)))
1604 return p;
1605 }
1606
1607 return NULL;
1608}
1609
1610struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1611 struct neigh_table *tbl)
1612{
1613 struct neigh_parms *p;
1614 struct net *net = dev_net(dev);
1615 const struct net_device_ops *ops = dev->netdev_ops;
1616
1617 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1618 if (p) {
1619 p->tbl = tbl;
1620 refcount_set(&p->refcnt, 1);
1621 p->reachable_time =
1622 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1623 dev_hold(dev);
1624 p->dev = dev;
1625 write_pnet(&p->net, net);
1626 p->sysctl_table = NULL;
1627
1628 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1629 dev_put(dev);
1630 kfree(p);
1631 return NULL;
1632 }
1633
1634 write_lock_bh(&tbl->lock);
1635 list_add(&p->list, &tbl->parms.list);
1636 write_unlock_bh(&tbl->lock);
1637
1638 neigh_parms_data_state_cleanall(p);
1639 }
1640 return p;
1641}
1642EXPORT_SYMBOL(neigh_parms_alloc);
1643
1644static void neigh_rcu_free_parms(struct rcu_head *head)
1645{
1646 struct neigh_parms *parms =
1647 container_of(head, struct neigh_parms, rcu_head);
1648
1649 neigh_parms_put(parms);
1650}
1651
1652void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1653{
1654 if (!parms || parms == &tbl->parms)
1655 return;
1656 write_lock_bh(&tbl->lock);
1657 list_del(&parms->list);
1658 parms->dead = 1;
1659 write_unlock_bh(&tbl->lock);
1660 if (parms->dev)
1661 dev_put(parms->dev);
1662 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1663}
1664EXPORT_SYMBOL(neigh_parms_release);
1665
1666static void neigh_parms_destroy(struct neigh_parms *parms)
1667{
1668 kfree(parms);
1669}
1670
1671static struct lock_class_key neigh_table_proxy_queue_class;
1672
1673static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1674
1675void neigh_table_init(int index, struct neigh_table *tbl)
1676{
1677 unsigned long now = jiffies;
1678 unsigned long phsize;
1679
1680 INIT_LIST_HEAD(&tbl->parms_list);
1681 INIT_LIST_HEAD(&tbl->gc_list);
1682 list_add(&tbl->parms.list, &tbl->parms_list);
1683 write_pnet(&tbl->parms.net, &init_net);
1684 refcount_set(&tbl->parms.refcnt, 1);
1685 tbl->parms.reachable_time =
1686 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1687
1688 tbl->stats = alloc_percpu(struct neigh_statistics);
1689 if (!tbl->stats)
1690 panic("cannot create neighbour cache statistics");
1691
1692#ifdef CONFIG_PROC_FS
1693 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1694 &neigh_stat_seq_ops, tbl))
1695 panic("cannot create neighbour proc dir entry");
1696#endif
1697
1698 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1699
1700 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1701 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1702
1703 if (!tbl->nht || !tbl->phash_buckets)
1704 panic("cannot allocate neighbour cache hashes");
1705
1706 if (!tbl->entry_size)
1707 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1708 tbl->key_len, NEIGH_PRIV_ALIGN);
1709 else
1710 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1711
1712 rwlock_init(&tbl->lock);
1713 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1714 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1715 tbl->parms.reachable_time);
1716 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1717 skb_queue_head_init_class(&tbl->proxy_queue,
1718 &neigh_table_proxy_queue_class);
1719
1720 tbl->last_flush = now;
1721 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1722
1723 neigh_tables[index] = tbl;
1724}
1725EXPORT_SYMBOL(neigh_table_init);
1726
1727int neigh_table_clear(int index, struct neigh_table *tbl)
1728{
1729 neigh_tables[index] = NULL;
1730 /* It is not clean... Fix it to unload IPv6 module safely */
1731 cancel_delayed_work_sync(&tbl->gc_work);
1732 del_timer_sync(&tbl->proxy_timer);
1733 pneigh_queue_purge(&tbl->proxy_queue);
1734 neigh_ifdown(tbl, NULL);
1735 if (atomic_read(&tbl->entries))
1736 pr_crit("neighbour leakage\n");
1737
1738 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1739 neigh_hash_free_rcu);
1740 tbl->nht = NULL;
1741
1742 kfree(tbl->phash_buckets);
1743 tbl->phash_buckets = NULL;
1744
1745 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1746
1747 free_percpu(tbl->stats);
1748 tbl->stats = NULL;
1749
1750 return 0;
1751}
1752EXPORT_SYMBOL(neigh_table_clear);
1753
1754static struct neigh_table *neigh_find_table(int family)
1755{
1756 struct neigh_table *tbl = NULL;
1757
1758 switch (family) {
1759 case AF_INET:
1760 tbl = neigh_tables[NEIGH_ARP_TABLE];
1761 break;
1762 case AF_INET6:
1763 tbl = neigh_tables[NEIGH_ND_TABLE];
1764 break;
1765 case AF_DECnet:
1766 tbl = neigh_tables[NEIGH_DN_TABLE];
1767 break;
1768 }
1769
1770 return tbl;
1771}
1772
1773const struct nla_policy nda_policy[NDA_MAX+1] = {
1774 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1775 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1776 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1777 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1778 [NDA_PROBES] = { .type = NLA_U32 },
1779 [NDA_VLAN] = { .type = NLA_U16 },
1780 [NDA_PORT] = { .type = NLA_U16 },
1781 [NDA_VNI] = { .type = NLA_U32 },
1782 [NDA_IFINDEX] = { .type = NLA_U32 },
1783 [NDA_MASTER] = { .type = NLA_U32 },
1784 [NDA_PROTOCOL] = { .type = NLA_U8 },
1785 [NDA_NH_ID] = { .type = NLA_U32 },
1786 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1787};
1788
1789static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1790 struct netlink_ext_ack *extack)
1791{
1792 struct net *net = sock_net(skb->sk);
1793 struct ndmsg *ndm;
1794 struct nlattr *dst_attr;
1795 struct neigh_table *tbl;
1796 struct neighbour *neigh;
1797 struct net_device *dev = NULL;
1798 int err = -EINVAL;
1799
1800 ASSERT_RTNL();
1801 if (nlmsg_len(nlh) < sizeof(*ndm))
1802 goto out;
1803
1804 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1805 if (!dst_attr) {
1806 NL_SET_ERR_MSG(extack, "Network address not specified");
1807 goto out;
1808 }
1809
1810 ndm = nlmsg_data(nlh);
1811 if (ndm->ndm_ifindex) {
1812 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1813 if (dev == NULL) {
1814 err = -ENODEV;
1815 goto out;
1816 }
1817 }
1818
1819 tbl = neigh_find_table(ndm->ndm_family);
1820 if (tbl == NULL)
1821 return -EAFNOSUPPORT;
1822
1823 if (nla_len(dst_attr) < (int)tbl->key_len) {
1824 NL_SET_ERR_MSG(extack, "Invalid network address");
1825 goto out;
1826 }
1827
1828 if (ndm->ndm_flags & NTF_PROXY) {
1829 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1830 goto out;
1831 }
1832
1833 if (dev == NULL)
1834 goto out;
1835
1836 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1837 if (neigh == NULL) {
1838 err = -ENOENT;
1839 goto out;
1840 }
1841
1842 err = __neigh_update(neigh, NULL, NUD_FAILED,
1843 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1844 NETLINK_CB(skb).portid, extack);
1845 write_lock_bh(&tbl->lock);
1846 neigh_release(neigh);
1847 neigh_remove_one(neigh, tbl);
1848 write_unlock_bh(&tbl->lock);
1849
1850out:
1851 return err;
1852}
1853
1854static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1855 struct netlink_ext_ack *extack)
1856{
1857 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1858 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1859 struct net *net = sock_net(skb->sk);
1860 struct ndmsg *ndm;
1861 struct nlattr *tb[NDA_MAX+1];
1862 struct neigh_table *tbl;
1863 struct net_device *dev = NULL;
1864 struct neighbour *neigh;
1865 void *dst, *lladdr;
1866 u8 protocol = 0;
1867 int err;
1868
1869 ASSERT_RTNL();
1870 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1871 nda_policy, extack);
1872 if (err < 0)
1873 goto out;
1874
1875 err = -EINVAL;
1876 if (!tb[NDA_DST]) {
1877 NL_SET_ERR_MSG(extack, "Network address not specified");
1878 goto out;
1879 }
1880
1881 ndm = nlmsg_data(nlh);
1882 if (ndm->ndm_ifindex) {
1883 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1884 if (dev == NULL) {
1885 err = -ENODEV;
1886 goto out;
1887 }
1888
1889 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1890 NL_SET_ERR_MSG(extack, "Invalid link address");
1891 goto out;
1892 }
1893 }
1894
1895 tbl = neigh_find_table(ndm->ndm_family);
1896 if (tbl == NULL)
1897 return -EAFNOSUPPORT;
1898
1899 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1900 NL_SET_ERR_MSG(extack, "Invalid network address");
1901 goto out;
1902 }
1903
1904 dst = nla_data(tb[NDA_DST]);
1905 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1906
1907 if (tb[NDA_PROTOCOL])
1908 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1909
1910 if (ndm->ndm_flags & NTF_PROXY) {
1911 struct pneigh_entry *pn;
1912
1913 err = -ENOBUFS;
1914 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1915 if (pn) {
1916 pn->flags = ndm->ndm_flags;
1917 if (protocol)
1918 pn->protocol = protocol;
1919 err = 0;
1920 }
1921 goto out;
1922 }
1923
1924 if (!dev) {
1925 NL_SET_ERR_MSG(extack, "Device not specified");
1926 goto out;
1927 }
1928
1929 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1930 err = -EINVAL;
1931 goto out;
1932 }
1933
1934 neigh = neigh_lookup(tbl, dst, dev);
1935 if (neigh == NULL) {
1936 bool exempt_from_gc;
1937
1938 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1939 err = -ENOENT;
1940 goto out;
1941 }
1942
1943 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1944 ndm->ndm_flags & NTF_EXT_LEARNED;
1945 neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
1946 if (IS_ERR(neigh)) {
1947 err = PTR_ERR(neigh);
1948 goto out;
1949 }
1950 } else {
1951 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1952 err = -EEXIST;
1953 neigh_release(neigh);
1954 goto out;
1955 }
1956
1957 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1958 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1959 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1960 }
1961
1962 if (protocol)
1963 neigh->protocol = protocol;
1964
1965 if (ndm->ndm_flags & NTF_EXT_LEARNED)
1966 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1967
1968 if (ndm->ndm_flags & NTF_ROUTER)
1969 flags |= NEIGH_UPDATE_F_ISROUTER;
1970
1971 if (ndm->ndm_flags & NTF_USE) {
1972 neigh_event_send(neigh, NULL);
1973 err = 0;
1974 } else
1975 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1976 NETLINK_CB(skb).portid, extack);
1977
1978 neigh_release(neigh);
1979
1980out:
1981 return err;
1982}
1983
1984static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1985{
1986 struct nlattr *nest;
1987
1988 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
1989 if (nest == NULL)
1990 return -ENOBUFS;
1991
1992 if ((parms->dev &&
1993 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1994 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1995 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1996 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1997 /* approximative value for deprecated QUEUE_LEN (in packets) */
1998 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1999 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2000 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2001 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2002 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2003 NEIGH_VAR(parms, UCAST_PROBES)) ||
2004 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2005 NEIGH_VAR(parms, MCAST_PROBES)) ||
2006 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2007 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2008 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2009 NDTPA_PAD) ||
2010 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2011 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2012 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2013 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2014 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2015 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2016 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2017 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2018 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2019 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2020 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2021 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2022 nla_put_msecs(skb, NDTPA_LOCKTIME,
2023 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2024 goto nla_put_failure;
2025 return nla_nest_end(skb, nest);
2026
2027nla_put_failure:
2028 nla_nest_cancel(skb, nest);
2029 return -EMSGSIZE;
2030}
2031
2032static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2033 u32 pid, u32 seq, int type, int flags)
2034{
2035 struct nlmsghdr *nlh;
2036 struct ndtmsg *ndtmsg;
2037
2038 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2039 if (nlh == NULL)
2040 return -EMSGSIZE;
2041
2042 ndtmsg = nlmsg_data(nlh);
2043
2044 read_lock_bh(&tbl->lock);
2045 ndtmsg->ndtm_family = tbl->family;
2046 ndtmsg->ndtm_pad1 = 0;
2047 ndtmsg->ndtm_pad2 = 0;
2048
2049 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2050 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2051 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2052 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2053 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2054 goto nla_put_failure;
2055 {
2056 unsigned long now = jiffies;
2057 long flush_delta = now - tbl->last_flush;
2058 long rand_delta = now - tbl->last_rand;
2059 struct neigh_hash_table *nht;
2060 struct ndt_config ndc = {
2061 .ndtc_key_len = tbl->key_len,
2062 .ndtc_entry_size = tbl->entry_size,
2063 .ndtc_entries = atomic_read(&tbl->entries),
2064 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2065 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2066 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2067 };
2068
2069 rcu_read_lock_bh();
2070 nht = rcu_dereference_bh(tbl->nht);
2071 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2072 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2073 rcu_read_unlock_bh();
2074
2075 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2076 goto nla_put_failure;
2077 }
2078
2079 {
2080 int cpu;
2081 struct ndt_stats ndst;
2082
2083 memset(&ndst, 0, sizeof(ndst));
2084
2085 for_each_possible_cpu(cpu) {
2086 struct neigh_statistics *st;
2087
2088 st = per_cpu_ptr(tbl->stats, cpu);
2089 ndst.ndts_allocs += st->allocs;
2090 ndst.ndts_destroys += st->destroys;
2091 ndst.ndts_hash_grows += st->hash_grows;
2092 ndst.ndts_res_failed += st->res_failed;
2093 ndst.ndts_lookups += st->lookups;
2094 ndst.ndts_hits += st->hits;
2095 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2096 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2097 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2098 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
2099 ndst.ndts_table_fulls += st->table_fulls;
2100 }
2101
2102 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2103 NDTA_PAD))
2104 goto nla_put_failure;
2105 }
2106
2107 BUG_ON(tbl->parms.dev);
2108 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2109 goto nla_put_failure;
2110
2111 read_unlock_bh(&tbl->lock);
2112 nlmsg_end(skb, nlh);
2113 return 0;
2114
2115nla_put_failure:
2116 read_unlock_bh(&tbl->lock);
2117 nlmsg_cancel(skb, nlh);
2118 return -EMSGSIZE;
2119}
2120
2121static int neightbl_fill_param_info(struct sk_buff *skb,
2122 struct neigh_table *tbl,
2123 struct neigh_parms *parms,
2124 u32 pid, u32 seq, int type,
2125 unsigned int flags)
2126{
2127 struct ndtmsg *ndtmsg;
2128 struct nlmsghdr *nlh;
2129
2130 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2131 if (nlh == NULL)
2132 return -EMSGSIZE;
2133
2134 ndtmsg = nlmsg_data(nlh);
2135
2136 read_lock_bh(&tbl->lock);
2137 ndtmsg->ndtm_family = tbl->family;
2138 ndtmsg->ndtm_pad1 = 0;
2139 ndtmsg->ndtm_pad2 = 0;
2140
2141 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2142 neightbl_fill_parms(skb, parms) < 0)
2143 goto errout;
2144
2145 read_unlock_bh(&tbl->lock);
2146 nlmsg_end(skb, nlh);
2147 return 0;
2148errout:
2149 read_unlock_bh(&tbl->lock);
2150 nlmsg_cancel(skb, nlh);
2151 return -EMSGSIZE;
2152}
2153
2154static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2155 [NDTA_NAME] = { .type = NLA_STRING },
2156 [NDTA_THRESH1] = { .type = NLA_U32 },
2157 [NDTA_THRESH2] = { .type = NLA_U32 },
2158 [NDTA_THRESH3] = { .type = NLA_U32 },
2159 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2160 [NDTA_PARMS] = { .type = NLA_NESTED },
2161};
2162
2163static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2164 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2165 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2166 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2167 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2168 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2169 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2170 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2171 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2172 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2173 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2174 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2175 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2176 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2177 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2178};
2179
2180static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2181 struct netlink_ext_ack *extack)
2182{
2183 struct net *net = sock_net(skb->sk);
2184 struct neigh_table *tbl;
2185 struct ndtmsg *ndtmsg;
2186 struct nlattr *tb[NDTA_MAX+1];
2187 bool found = false;
2188 int err, tidx;
2189
2190 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2191 nl_neightbl_policy, extack);
2192 if (err < 0)
2193 goto errout;
2194
2195 if (tb[NDTA_NAME] == NULL) {
2196 err = -EINVAL;
2197 goto errout;
2198 }
2199
2200 ndtmsg = nlmsg_data(nlh);
2201
2202 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2203 tbl = neigh_tables[tidx];
2204 if (!tbl)
2205 continue;
2206 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2207 continue;
2208 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2209 found = true;
2210 break;
2211 }
2212 }
2213
2214 if (!found)
2215 return -ENOENT;
2216
2217 /*
2218 * We acquire tbl->lock to be nice to the periodic timers and
2219 * make sure they always see a consistent set of values.
2220 */
2221 write_lock_bh(&tbl->lock);
2222
2223 if (tb[NDTA_PARMS]) {
2224 struct nlattr *tbp[NDTPA_MAX+1];
2225 struct neigh_parms *p;
2226 int i, ifindex = 0;
2227
2228 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2229 tb[NDTA_PARMS],
2230 nl_ntbl_parm_policy, extack);
2231 if (err < 0)
2232 goto errout_tbl_lock;
2233
2234 if (tbp[NDTPA_IFINDEX])
2235 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2236
2237 p = lookup_neigh_parms(tbl, net, ifindex);
2238 if (p == NULL) {
2239 err = -ENOENT;
2240 goto errout_tbl_lock;
2241 }
2242
2243 for (i = 1; i <= NDTPA_MAX; i++) {
2244 if (tbp[i] == NULL)
2245 continue;
2246
2247 switch (i) {
2248 case NDTPA_QUEUE_LEN:
2249 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2250 nla_get_u32(tbp[i]) *
2251 SKB_TRUESIZE(ETH_FRAME_LEN));
2252 break;
2253 case NDTPA_QUEUE_LENBYTES:
2254 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2255 nla_get_u32(tbp[i]));
2256 break;
2257 case NDTPA_PROXY_QLEN:
2258 NEIGH_VAR_SET(p, PROXY_QLEN,
2259 nla_get_u32(tbp[i]));
2260 break;
2261 case NDTPA_APP_PROBES:
2262 NEIGH_VAR_SET(p, APP_PROBES,
2263 nla_get_u32(tbp[i]));
2264 break;
2265 case NDTPA_UCAST_PROBES:
2266 NEIGH_VAR_SET(p, UCAST_PROBES,
2267 nla_get_u32(tbp[i]));
2268 break;
2269 case NDTPA_MCAST_PROBES:
2270 NEIGH_VAR_SET(p, MCAST_PROBES,
2271 nla_get_u32(tbp[i]));
2272 break;
2273 case NDTPA_MCAST_REPROBES:
2274 NEIGH_VAR_SET(p, MCAST_REPROBES,
2275 nla_get_u32(tbp[i]));
2276 break;
2277 case NDTPA_BASE_REACHABLE_TIME:
2278 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2279 nla_get_msecs(tbp[i]));
2280 /* update reachable_time as well, otherwise, the change will
2281 * only be effective after the next time neigh_periodic_work
2282 * decides to recompute it (can be multiple minutes)
2283 */
2284 p->reachable_time =
2285 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2286 break;
2287 case NDTPA_GC_STALETIME:
2288 NEIGH_VAR_SET(p, GC_STALETIME,
2289 nla_get_msecs(tbp[i]));
2290 break;
2291 case NDTPA_DELAY_PROBE_TIME:
2292 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2293 nla_get_msecs(tbp[i]));
2294 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2295 break;
2296 case NDTPA_RETRANS_TIME:
2297 NEIGH_VAR_SET(p, RETRANS_TIME,
2298 nla_get_msecs(tbp[i]));
2299 break;
2300 case NDTPA_ANYCAST_DELAY:
2301 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2302 nla_get_msecs(tbp[i]));
2303 break;
2304 case NDTPA_PROXY_DELAY:
2305 NEIGH_VAR_SET(p, PROXY_DELAY,
2306 nla_get_msecs(tbp[i]));
2307 break;
2308 case NDTPA_LOCKTIME:
2309 NEIGH_VAR_SET(p, LOCKTIME,
2310 nla_get_msecs(tbp[i]));
2311 break;
2312 }
2313 }
2314 }
2315
2316 err = -ENOENT;
2317 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2318 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2319 !net_eq(net, &init_net))
2320 goto errout_tbl_lock;
2321
2322 if (tb[NDTA_THRESH1])
2323 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2324
2325 if (tb[NDTA_THRESH2])
2326 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2327
2328 if (tb[NDTA_THRESH3])
2329 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2330
2331 if (tb[NDTA_GC_INTERVAL])
2332 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2333
2334 err = 0;
2335
2336errout_tbl_lock:
2337 write_unlock_bh(&tbl->lock);
2338errout:
2339 return err;
2340}
2341
2342static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2343 struct netlink_ext_ack *extack)
2344{
2345 struct ndtmsg *ndtm;
2346
2347 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2348 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2349 return -EINVAL;
2350 }
2351
2352 ndtm = nlmsg_data(nlh);
2353 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2354 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2355 return -EINVAL;
2356 }
2357
2358 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2359 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2360 return -EINVAL;
2361 }
2362
2363 return 0;
2364}
2365
2366static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2367{
2368 const struct nlmsghdr *nlh = cb->nlh;
2369 struct net *net = sock_net(skb->sk);
2370 int family, tidx, nidx = 0;
2371 int tbl_skip = cb->args[0];
2372 int neigh_skip = cb->args[1];
2373 struct neigh_table *tbl;
2374
2375 if (cb->strict_check) {
2376 int err = neightbl_valid_dump_info(nlh, cb->extack);
2377
2378 if (err < 0)
2379 return err;
2380 }
2381
2382 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2383
2384 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2385 struct neigh_parms *p;
2386
2387 tbl = neigh_tables[tidx];
2388 if (!tbl)
2389 continue;
2390
2391 if (tidx < tbl_skip || (family && tbl->family != family))
2392 continue;
2393
2394 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2395 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2396 NLM_F_MULTI) < 0)
2397 break;
2398
2399 nidx = 0;
2400 p = list_next_entry(&tbl->parms, list);
2401 list_for_each_entry_from(p, &tbl->parms_list, list) {
2402 if (!net_eq(neigh_parms_net(p), net))
2403 continue;
2404
2405 if (nidx < neigh_skip)
2406 goto next;
2407
2408 if (neightbl_fill_param_info(skb, tbl, p,
2409 NETLINK_CB(cb->skb).portid,
2410 nlh->nlmsg_seq,
2411 RTM_NEWNEIGHTBL,
2412 NLM_F_MULTI) < 0)
2413 goto out;
2414 next:
2415 nidx++;
2416 }
2417
2418 neigh_skip = 0;
2419 }
2420out:
2421 cb->args[0] = tidx;
2422 cb->args[1] = nidx;
2423
2424 return skb->len;
2425}
2426
2427static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2428 u32 pid, u32 seq, int type, unsigned int flags)
2429{
2430 unsigned long now = jiffies;
2431 struct nda_cacheinfo ci;
2432 struct nlmsghdr *nlh;
2433 struct ndmsg *ndm;
2434
2435 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2436 if (nlh == NULL)
2437 return -EMSGSIZE;
2438
2439 ndm = nlmsg_data(nlh);
2440 ndm->ndm_family = neigh->ops->family;
2441 ndm->ndm_pad1 = 0;
2442 ndm->ndm_pad2 = 0;
2443 ndm->ndm_flags = neigh->flags;
2444 ndm->ndm_type = neigh->type;
2445 ndm->ndm_ifindex = neigh->dev->ifindex;
2446
2447 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2448 goto nla_put_failure;
2449
2450 read_lock_bh(&neigh->lock);
2451 ndm->ndm_state = neigh->nud_state;
2452 if (neigh->nud_state & NUD_VALID) {
2453 char haddr[MAX_ADDR_LEN];
2454
2455 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2456 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2457 read_unlock_bh(&neigh->lock);
2458 goto nla_put_failure;
2459 }
2460 }
2461
2462 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2463 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2464 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2465 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2466 read_unlock_bh(&neigh->lock);
2467
2468 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2469 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2470 goto nla_put_failure;
2471
2472 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2473 goto nla_put_failure;
2474
2475 nlmsg_end(skb, nlh);
2476 return 0;
2477
2478nla_put_failure:
2479 nlmsg_cancel(skb, nlh);
2480 return -EMSGSIZE;
2481}
2482
2483static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2484 u32 pid, u32 seq, int type, unsigned int flags,
2485 struct neigh_table *tbl)
2486{
2487 struct nlmsghdr *nlh;
2488 struct ndmsg *ndm;
2489
2490 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2491 if (nlh == NULL)
2492 return -EMSGSIZE;
2493
2494 ndm = nlmsg_data(nlh);
2495 ndm->ndm_family = tbl->family;
2496 ndm->ndm_pad1 = 0;
2497 ndm->ndm_pad2 = 0;
2498 ndm->ndm_flags = pn->flags | NTF_PROXY;
2499 ndm->ndm_type = RTN_UNICAST;
2500 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2501 ndm->ndm_state = NUD_NONE;
2502
2503 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2504 goto nla_put_failure;
2505
2506 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2507 goto nla_put_failure;
2508
2509 nlmsg_end(skb, nlh);
2510 return 0;
2511
2512nla_put_failure:
2513 nlmsg_cancel(skb, nlh);
2514 return -EMSGSIZE;
2515}
2516
2517static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2518{
2519 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2520 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2521}
2522
2523static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2524{
2525 struct net_device *master;
2526
2527 if (!master_idx)
2528 return false;
2529
2530 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2531 if (!master || master->ifindex != master_idx)
2532 return true;
2533
2534 return false;
2535}
2536
2537static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2538{
2539 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2540 return true;
2541
2542 return false;
2543}
2544
2545struct neigh_dump_filter {
2546 int master_idx;
2547 int dev_idx;
2548};
2549
2550static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2551 struct netlink_callback *cb,
2552 struct neigh_dump_filter *filter)
2553{
2554 struct net *net = sock_net(skb->sk);
2555 struct neighbour *n;
2556 int rc, h, s_h = cb->args[1];
2557 int idx, s_idx = idx = cb->args[2];
2558 struct neigh_hash_table *nht;
2559 unsigned int flags = NLM_F_MULTI;
2560
2561 if (filter->dev_idx || filter->master_idx)
2562 flags |= NLM_F_DUMP_FILTERED;
2563
2564 rcu_read_lock_bh();
2565 nht = rcu_dereference_bh(tbl->nht);
2566
2567 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2568 if (h > s_h)
2569 s_idx = 0;
2570 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2571 n != NULL;
2572 n = rcu_dereference_bh(n->next)) {
2573 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2574 goto next;
2575 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2576 neigh_master_filtered(n->dev, filter->master_idx))
2577 goto next;
2578 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2579 cb->nlh->nlmsg_seq,
2580 RTM_NEWNEIGH,
2581 flags) < 0) {
2582 rc = -1;
2583 goto out;
2584 }
2585next:
2586 idx++;
2587 }
2588 }
2589 rc = skb->len;
2590out:
2591 rcu_read_unlock_bh();
2592 cb->args[1] = h;
2593 cb->args[2] = idx;
2594 return rc;
2595}
2596
2597static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2598 struct netlink_callback *cb,
2599 struct neigh_dump_filter *filter)
2600{
2601 struct pneigh_entry *n;
2602 struct net *net = sock_net(skb->sk);
2603 int rc, h, s_h = cb->args[3];
2604 int idx, s_idx = idx = cb->args[4];
2605 unsigned int flags = NLM_F_MULTI;
2606
2607 if (filter->dev_idx || filter->master_idx)
2608 flags |= NLM_F_DUMP_FILTERED;
2609
2610 read_lock_bh(&tbl->lock);
2611
2612 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2613 if (h > s_h)
2614 s_idx = 0;
2615 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2616 if (idx < s_idx || pneigh_net(n) != net)
2617 goto next;
2618 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2619 neigh_master_filtered(n->dev, filter->master_idx))
2620 goto next;
2621 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2622 cb->nlh->nlmsg_seq,
2623 RTM_NEWNEIGH, flags, tbl) < 0) {
2624 read_unlock_bh(&tbl->lock);
2625 rc = -1;
2626 goto out;
2627 }
2628 next:
2629 idx++;
2630 }
2631 }
2632
2633 read_unlock_bh(&tbl->lock);
2634 rc = skb->len;
2635out:
2636 cb->args[3] = h;
2637 cb->args[4] = idx;
2638 return rc;
2639
2640}
2641
2642static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2643 bool strict_check,
2644 struct neigh_dump_filter *filter,
2645 struct netlink_ext_ack *extack)
2646{
2647 struct nlattr *tb[NDA_MAX + 1];
2648 int err, i;
2649
2650 if (strict_check) {
2651 struct ndmsg *ndm;
2652
2653 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2654 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2655 return -EINVAL;
2656 }
2657
2658 ndm = nlmsg_data(nlh);
2659 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2660 ndm->ndm_state || ndm->ndm_type) {
2661 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2662 return -EINVAL;
2663 }
2664
2665 if (ndm->ndm_flags & ~NTF_PROXY) {
2666 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2667 return -EINVAL;
2668 }
2669
2670 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2671 tb, NDA_MAX, nda_policy,
2672 extack);
2673 } else {
2674 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2675 NDA_MAX, nda_policy, extack);
2676 }
2677 if (err < 0)
2678 return err;
2679
2680 for (i = 0; i <= NDA_MAX; ++i) {
2681 if (!tb[i])
2682 continue;
2683
2684 /* all new attributes should require strict_check */
2685 switch (i) {
2686 case NDA_IFINDEX:
2687 filter->dev_idx = nla_get_u32(tb[i]);
2688 break;
2689 case NDA_MASTER:
2690 filter->master_idx = nla_get_u32(tb[i]);
2691 break;
2692 default:
2693 if (strict_check) {
2694 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2695 return -EINVAL;
2696 }
2697 }
2698 }
2699
2700 return 0;
2701}
2702
2703static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2704{
2705 const struct nlmsghdr *nlh = cb->nlh;
2706 struct neigh_dump_filter filter = {};
2707 struct neigh_table *tbl;
2708 int t, family, s_t;
2709 int proxy = 0;
2710 int err;
2711
2712 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2713
2714 /* check for full ndmsg structure presence, family member is
2715 * the same for both structures
2716 */
2717 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2718 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2719 proxy = 1;
2720
2721 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2722 if (err < 0 && cb->strict_check)
2723 return err;
2724
2725 s_t = cb->args[0];
2726
2727 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2728 tbl = neigh_tables[t];
2729
2730 if (!tbl)
2731 continue;
2732 if (t < s_t || (family && tbl->family != family))
2733 continue;
2734 if (t > s_t)
2735 memset(&cb->args[1], 0, sizeof(cb->args) -
2736 sizeof(cb->args[0]));
2737 if (proxy)
2738 err = pneigh_dump_table(tbl, skb, cb, &filter);
2739 else
2740 err = neigh_dump_table(tbl, skb, cb, &filter);
2741 if (err < 0)
2742 break;
2743 }
2744
2745 cb->args[0] = t;
2746 return skb->len;
2747}
2748
2749static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2750 struct neigh_table **tbl,
2751 void **dst, int *dev_idx, u8 *ndm_flags,
2752 struct netlink_ext_ack *extack)
2753{
2754 struct nlattr *tb[NDA_MAX + 1];
2755 struct ndmsg *ndm;
2756 int err, i;
2757
2758 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2759 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2760 return -EINVAL;
2761 }
2762
2763 ndm = nlmsg_data(nlh);
2764 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2765 ndm->ndm_type) {
2766 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2767 return -EINVAL;
2768 }
2769
2770 if (ndm->ndm_flags & ~NTF_PROXY) {
2771 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2772 return -EINVAL;
2773 }
2774
2775 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2776 NDA_MAX, nda_policy, extack);
2777 if (err < 0)
2778 return err;
2779
2780 *ndm_flags = ndm->ndm_flags;
2781 *dev_idx = ndm->ndm_ifindex;
2782 *tbl = neigh_find_table(ndm->ndm_family);
2783 if (*tbl == NULL) {
2784 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2785 return -EAFNOSUPPORT;
2786 }
2787
2788 for (i = 0; i <= NDA_MAX; ++i) {
2789 if (!tb[i])
2790 continue;
2791
2792 switch (i) {
2793 case NDA_DST:
2794 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2795 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2796 return -EINVAL;
2797 }
2798 *dst = nla_data(tb[i]);
2799 break;
2800 default:
2801 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2802 return -EINVAL;
2803 }
2804 }
2805
2806 return 0;
2807}
2808
2809static inline size_t neigh_nlmsg_size(void)
2810{
2811 return NLMSG_ALIGN(sizeof(struct ndmsg))
2812 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2813 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2814 + nla_total_size(sizeof(struct nda_cacheinfo))
2815 + nla_total_size(4) /* NDA_PROBES */
2816 + nla_total_size(1); /* NDA_PROTOCOL */
2817}
2818
2819static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2820 u32 pid, u32 seq)
2821{
2822 struct sk_buff *skb;
2823 int err = 0;
2824
2825 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2826 if (!skb)
2827 return -ENOBUFS;
2828
2829 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2830 if (err) {
2831 kfree_skb(skb);
2832 goto errout;
2833 }
2834
2835 err = rtnl_unicast(skb, net, pid);
2836errout:
2837 return err;
2838}
2839
2840static inline size_t pneigh_nlmsg_size(void)
2841{
2842 return NLMSG_ALIGN(sizeof(struct ndmsg))
2843 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2844 + nla_total_size(1); /* NDA_PROTOCOL */
2845}
2846
2847static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2848 u32 pid, u32 seq, struct neigh_table *tbl)
2849{
2850 struct sk_buff *skb;
2851 int err = 0;
2852
2853 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2854 if (!skb)
2855 return -ENOBUFS;
2856
2857 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2858 if (err) {
2859 kfree_skb(skb);
2860 goto errout;
2861 }
2862
2863 err = rtnl_unicast(skb, net, pid);
2864errout:
2865 return err;
2866}
2867
2868static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2869 struct netlink_ext_ack *extack)
2870{
2871 struct net *net = sock_net(in_skb->sk);
2872 struct net_device *dev = NULL;
2873 struct neigh_table *tbl = NULL;
2874 struct neighbour *neigh;
2875 void *dst = NULL;
2876 u8 ndm_flags = 0;
2877 int dev_idx = 0;
2878 int err;
2879
2880 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2881 extack);
2882 if (err < 0)
2883 return err;
2884
2885 if (dev_idx) {
2886 dev = __dev_get_by_index(net, dev_idx);
2887 if (!dev) {
2888 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2889 return -ENODEV;
2890 }
2891 }
2892
2893 if (!dst) {
2894 NL_SET_ERR_MSG(extack, "Network address not specified");
2895 return -EINVAL;
2896 }
2897
2898 if (ndm_flags & NTF_PROXY) {
2899 struct pneigh_entry *pn;
2900
2901 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2902 if (!pn) {
2903 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2904 return -ENOENT;
2905 }
2906 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2907 nlh->nlmsg_seq, tbl);
2908 }
2909
2910 if (!dev) {
2911 NL_SET_ERR_MSG(extack, "No device specified");
2912 return -EINVAL;
2913 }
2914
2915 neigh = neigh_lookup(tbl, dst, dev);
2916 if (!neigh) {
2917 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2918 return -ENOENT;
2919 }
2920
2921 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2922 nlh->nlmsg_seq);
2923
2924 neigh_release(neigh);
2925
2926 return err;
2927}
2928
2929void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2930{
2931 int chain;
2932 struct neigh_hash_table *nht;
2933
2934 rcu_read_lock_bh();
2935 nht = rcu_dereference_bh(tbl->nht);
2936
2937 read_lock(&tbl->lock); /* avoid resizes */
2938 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2939 struct neighbour *n;
2940
2941 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2942 n != NULL;
2943 n = rcu_dereference_bh(n->next))
2944 cb(n, cookie);
2945 }
2946 read_unlock(&tbl->lock);
2947 rcu_read_unlock_bh();
2948}
2949EXPORT_SYMBOL(neigh_for_each);
2950
2951/* The tbl->lock must be held as a writer and BH disabled. */
2952void __neigh_for_each_release(struct neigh_table *tbl,
2953 int (*cb)(struct neighbour *))
2954{
2955 int chain;
2956 struct neigh_hash_table *nht;
2957
2958 nht = rcu_dereference_protected(tbl->nht,
2959 lockdep_is_held(&tbl->lock));
2960 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2961 struct neighbour *n;
2962 struct neighbour __rcu **np;
2963
2964 np = &nht->hash_buckets[chain];
2965 while ((n = rcu_dereference_protected(*np,
2966 lockdep_is_held(&tbl->lock))) != NULL) {
2967 int release;
2968
2969 write_lock(&n->lock);
2970 release = cb(n);
2971 if (release) {
2972 rcu_assign_pointer(*np,
2973 rcu_dereference_protected(n->next,
2974 lockdep_is_held(&tbl->lock)));
2975 neigh_mark_dead(n);
2976 } else
2977 np = &n->next;
2978 write_unlock(&n->lock);
2979 if (release)
2980 neigh_cleanup_and_release(n);
2981 }
2982 }
2983}
2984EXPORT_SYMBOL(__neigh_for_each_release);
2985
2986int neigh_xmit(int index, struct net_device *dev,
2987 const void *addr, struct sk_buff *skb)
2988{
2989 int err = -EAFNOSUPPORT;
2990 if (likely(index < NEIGH_NR_TABLES)) {
2991 struct neigh_table *tbl;
2992 struct neighbour *neigh;
2993
2994 tbl = neigh_tables[index];
2995 if (!tbl)
2996 goto out;
2997 rcu_read_lock_bh();
2998 if (index == NEIGH_ARP_TABLE) {
2999 u32 key = *((u32 *)addr);
3000
3001 neigh = __ipv4_neigh_lookup_noref(dev, key);
3002 } else {
3003 neigh = __neigh_lookup_noref(tbl, addr, dev);
3004 }
3005 if (!neigh)
3006 neigh = __neigh_create(tbl, addr, dev, false);
3007 err = PTR_ERR(neigh);
3008 if (IS_ERR(neigh)) {
3009 rcu_read_unlock_bh();
3010 goto out_kfree_skb;
3011 }
3012 err = neigh->output(neigh, skb);
3013 rcu_read_unlock_bh();
3014 }
3015 else if (index == NEIGH_LINK_TABLE) {
3016 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3017 addr, NULL, skb->len);
3018 if (err < 0)
3019 goto out_kfree_skb;
3020 err = dev_queue_xmit(skb);
3021 }
3022out:
3023 return err;
3024out_kfree_skb:
3025 kfree_skb(skb);
3026 goto out;
3027}
3028EXPORT_SYMBOL(neigh_xmit);
3029
3030#ifdef CONFIG_PROC_FS
3031
3032static struct neighbour *neigh_get_first(struct seq_file *seq)
3033{
3034 struct neigh_seq_state *state = seq->private;
3035 struct net *net = seq_file_net(seq);
3036 struct neigh_hash_table *nht = state->nht;
3037 struct neighbour *n = NULL;
3038 int bucket;
3039
3040 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3041 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3042 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3043
3044 while (n) {
3045 if (!net_eq(dev_net(n->dev), net))
3046 goto next;
3047 if (state->neigh_sub_iter) {
3048 loff_t fakep = 0;
3049 void *v;
3050
3051 v = state->neigh_sub_iter(state, n, &fakep);
3052 if (!v)
3053 goto next;
3054 }
3055 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3056 break;
3057 if (n->nud_state & ~NUD_NOARP)
3058 break;
3059next:
3060 n = rcu_dereference_bh(n->next);
3061 }
3062
3063 if (n)
3064 break;
3065 }
3066 state->bucket = bucket;
3067
3068 return n;
3069}
3070
3071static struct neighbour *neigh_get_next(struct seq_file *seq,
3072 struct neighbour *n,
3073 loff_t *pos)
3074{
3075 struct neigh_seq_state *state = seq->private;
3076 struct net *net = seq_file_net(seq);
3077 struct neigh_hash_table *nht = state->nht;
3078
3079 if (state->neigh_sub_iter) {
3080 void *v = state->neigh_sub_iter(state, n, pos);
3081 if (v)
3082 return n;
3083 }
3084 n = rcu_dereference_bh(n->next);
3085
3086 while (1) {
3087 while (n) {
3088 if (!net_eq(dev_net(n->dev), net))
3089 goto next;
3090 if (state->neigh_sub_iter) {
3091 void *v = state->neigh_sub_iter(state, n, pos);
3092 if (v)
3093 return n;
3094 goto next;
3095 }
3096 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3097 break;
3098
3099 if (n->nud_state & ~NUD_NOARP)
3100 break;
3101next:
3102 n = rcu_dereference_bh(n->next);
3103 }
3104
3105 if (n)
3106 break;
3107
3108 if (++state->bucket >= (1 << nht->hash_shift))
3109 break;
3110
3111 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3112 }
3113
3114 if (n && pos)
3115 --(*pos);
3116 return n;
3117}
3118
3119static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3120{
3121 struct neighbour *n = neigh_get_first(seq);
3122
3123 if (n) {
3124 --(*pos);
3125 while (*pos) {
3126 n = neigh_get_next(seq, n, pos);
3127 if (!n)
3128 break;
3129 }
3130 }
3131 return *pos ? NULL : n;
3132}
3133
3134static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3135{
3136 struct neigh_seq_state *state = seq->private;
3137 struct net *net = seq_file_net(seq);
3138 struct neigh_table *tbl = state->tbl;
3139 struct pneigh_entry *pn = NULL;
3140 int bucket = state->bucket;
3141
3142 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3143 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3144 pn = tbl->phash_buckets[bucket];
3145 while (pn && !net_eq(pneigh_net(pn), net))
3146 pn = pn->next;
3147 if (pn)
3148 break;
3149 }
3150 state->bucket = bucket;
3151
3152 return pn;
3153}
3154
3155static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3156 struct pneigh_entry *pn,
3157 loff_t *pos)
3158{
3159 struct neigh_seq_state *state = seq->private;
3160 struct net *net = seq_file_net(seq);
3161 struct neigh_table *tbl = state->tbl;
3162
3163 do {
3164 pn = pn->next;
3165 } while (pn && !net_eq(pneigh_net(pn), net));
3166
3167 while (!pn) {
3168 if (++state->bucket > PNEIGH_HASHMASK)
3169 break;
3170 pn = tbl->phash_buckets[state->bucket];
3171 while (pn && !net_eq(pneigh_net(pn), net))
3172 pn = pn->next;
3173 if (pn)
3174 break;
3175 }
3176
3177 if (pn && pos)
3178 --(*pos);
3179
3180 return pn;
3181}
3182
3183static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3184{
3185 struct pneigh_entry *pn = pneigh_get_first(seq);
3186
3187 if (pn) {
3188 --(*pos);
3189 while (*pos) {
3190 pn = pneigh_get_next(seq, pn, pos);
3191 if (!pn)
3192 break;
3193 }
3194 }
3195 return *pos ? NULL : pn;
3196}
3197
3198static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3199{
3200 struct neigh_seq_state *state = seq->private;
3201 void *rc;
3202 loff_t idxpos = *pos;
3203
3204 rc = neigh_get_idx(seq, &idxpos);
3205 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3206 rc = pneigh_get_idx(seq, &idxpos);
3207
3208 return rc;
3209}
3210
3211void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3212 __acquires(tbl->lock)
3213 __acquires(rcu_bh)
3214{
3215 struct neigh_seq_state *state = seq->private;
3216
3217 state->tbl = tbl;
3218 state->bucket = 0;
3219 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3220
3221 rcu_read_lock_bh();
3222 state->nht = rcu_dereference_bh(tbl->nht);
3223 read_lock(&tbl->lock);
3224
3225 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3226}
3227EXPORT_SYMBOL(neigh_seq_start);
3228
3229void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3230{
3231 struct neigh_seq_state *state;
3232 void *rc;
3233
3234 if (v == SEQ_START_TOKEN) {
3235 rc = neigh_get_first(seq);
3236 goto out;
3237 }
3238
3239 state = seq->private;
3240 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3241 rc = neigh_get_next(seq, v, NULL);
3242 if (rc)
3243 goto out;
3244 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3245 rc = pneigh_get_first(seq);
3246 } else {
3247 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3248 rc = pneigh_get_next(seq, v, NULL);
3249 }
3250out:
3251 ++(*pos);
3252 return rc;
3253}
3254EXPORT_SYMBOL(neigh_seq_next);
3255
3256void neigh_seq_stop(struct seq_file *seq, void *v)
3257 __releases(tbl->lock)
3258 __releases(rcu_bh)
3259{
3260 struct neigh_seq_state *state = seq->private;
3261 struct neigh_table *tbl = state->tbl;
3262
3263 read_unlock(&tbl->lock);
3264 rcu_read_unlock_bh();
3265}
3266EXPORT_SYMBOL(neigh_seq_stop);
3267
3268/* statistics via seq_file */
3269
3270static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3271{
3272 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3273 int cpu;
3274
3275 if (*pos == 0)
3276 return SEQ_START_TOKEN;
3277
3278 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3279 if (!cpu_possible(cpu))
3280 continue;
3281 *pos = cpu+1;
3282 return per_cpu_ptr(tbl->stats, cpu);
3283 }
3284 return NULL;
3285}
3286
3287static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3288{
3289 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3290 int cpu;
3291
3292 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3293 if (!cpu_possible(cpu))
3294 continue;
3295 *pos = cpu+1;
3296 return per_cpu_ptr(tbl->stats, cpu);
3297 }
3298 (*pos)++;
3299 return NULL;
3300}
3301
3302static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3303{
3304
3305}
3306
3307static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3308{
3309 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3310 struct neigh_statistics *st = v;
3311
3312 if (v == SEQ_START_TOKEN) {
3313 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3314 return 0;
3315 }
3316
3317 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3318 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3319 atomic_read(&tbl->entries),
3320
3321 st->allocs,
3322 st->destroys,
3323 st->hash_grows,
3324
3325 st->lookups,
3326 st->hits,
3327
3328 st->res_failed,
3329
3330 st->rcv_probes_mcast,
3331 st->rcv_probes_ucast,
3332
3333 st->periodic_gc_runs,
3334 st->forced_gc_runs,
3335 st->unres_discards,
3336 st->table_fulls
3337 );
3338
3339 return 0;
3340}
3341
3342static const struct seq_operations neigh_stat_seq_ops = {
3343 .start = neigh_stat_seq_start,
3344 .next = neigh_stat_seq_next,
3345 .stop = neigh_stat_seq_stop,
3346 .show = neigh_stat_seq_show,
3347};
3348#endif /* CONFIG_PROC_FS */
3349
3350static void __neigh_notify(struct neighbour *n, int type, int flags,
3351 u32 pid)
3352{
3353 struct net *net = dev_net(n->dev);
3354 struct sk_buff *skb;
3355 int err = -ENOBUFS;
3356
3357 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3358 if (skb == NULL)
3359 goto errout;
3360
3361 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3362 if (err < 0) {
3363 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3364 WARN_ON(err == -EMSGSIZE);
3365 kfree_skb(skb);
3366 goto errout;
3367 }
3368 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3369 return;
3370errout:
3371 if (err < 0)
3372 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3373}
3374
3375void neigh_app_ns(struct neighbour *n)
3376{
3377 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3378}
3379EXPORT_SYMBOL(neigh_app_ns);
3380
3381#ifdef CONFIG_SYSCTL
3382static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3383
3384static int proc_unres_qlen(struct ctl_table *ctl, int write,
3385 void *buffer, size_t *lenp, loff_t *ppos)
3386{
3387 int size, ret;
3388 struct ctl_table tmp = *ctl;
3389
3390 tmp.extra1 = SYSCTL_ZERO;
3391 tmp.extra2 = &unres_qlen_max;
3392 tmp.data = &size;
3393
3394 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3395 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3396
3397 if (write && !ret)
3398 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3399 return ret;
3400}
3401
3402static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3403 int family)
3404{
3405 switch (family) {
3406 case AF_INET:
3407 return __in_dev_arp_parms_get_rcu(dev);
3408 case AF_INET6:
3409 return __in6_dev_nd_parms_get_rcu(dev);
3410 }
3411 return NULL;
3412}
3413
3414static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3415 int index)
3416{
3417 struct net_device *dev;
3418 int family = neigh_parms_family(p);
3419
3420 rcu_read_lock();
3421 for_each_netdev_rcu(net, dev) {
3422 struct neigh_parms *dst_p =
3423 neigh_get_dev_parms_rcu(dev, family);
3424
3425 if (dst_p && !test_bit(index, dst_p->data_state))
3426 dst_p->data[index] = p->data[index];
3427 }
3428 rcu_read_unlock();
3429}
3430
3431static void neigh_proc_update(struct ctl_table *ctl, int write)
3432{
3433 struct net_device *dev = ctl->extra1;
3434 struct neigh_parms *p = ctl->extra2;
3435 struct net *net = neigh_parms_net(p);
3436 int index = (int *) ctl->data - p->data;
3437
3438 if (!write)
3439 return;
3440
3441 set_bit(index, p->data_state);
3442 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3443 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3444 if (!dev) /* NULL dev means this is default value */
3445 neigh_copy_dflt_parms(net, p, index);
3446}
3447
3448static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3449 void *buffer, size_t *lenp,
3450 loff_t *ppos)
3451{
3452 struct ctl_table tmp = *ctl;
3453 int ret;
3454
3455 tmp.extra1 = SYSCTL_ZERO;
3456 tmp.extra2 = SYSCTL_INT_MAX;
3457
3458 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3459 neigh_proc_update(ctl, write);
3460 return ret;
3461}
3462
3463int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3464 size_t *lenp, loff_t *ppos)
3465{
3466 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3467
3468 neigh_proc_update(ctl, write);
3469 return ret;
3470}
3471EXPORT_SYMBOL(neigh_proc_dointvec);
3472
3473int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3474 size_t *lenp, loff_t *ppos)
3475{
3476 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3477
3478 neigh_proc_update(ctl, write);
3479 return ret;
3480}
3481EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3482
3483static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3484 void *buffer, size_t *lenp,
3485 loff_t *ppos)
3486{
3487 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3488
3489 neigh_proc_update(ctl, write);
3490 return ret;
3491}
3492
3493int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3494 void *buffer, size_t *lenp, loff_t *ppos)
3495{
3496 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3497
3498 neigh_proc_update(ctl, write);
3499 return ret;
3500}
3501EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3502
3503static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3504 void *buffer, size_t *lenp,
3505 loff_t *ppos)
3506{
3507 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3508
3509 neigh_proc_update(ctl, write);
3510 return ret;
3511}
3512
3513static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3514 void *buffer, size_t *lenp,
3515 loff_t *ppos)
3516{
3517 struct neigh_parms *p = ctl->extra2;
3518 int ret;
3519
3520 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3521 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3522 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3523 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3524 else
3525 ret = -1;
3526
3527 if (write && ret == 0) {
3528 /* update reachable_time as well, otherwise, the change will
3529 * only be effective after the next time neigh_periodic_work
3530 * decides to recompute it
3531 */
3532 p->reachable_time =
3533 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3534 }
3535 return ret;
3536}
3537
3538#define NEIGH_PARMS_DATA_OFFSET(index) \
3539 (&((struct neigh_parms *) 0)->data[index])
3540
3541#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3542 [NEIGH_VAR_ ## attr] = { \
3543 .procname = name, \
3544 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3545 .maxlen = sizeof(int), \
3546 .mode = mval, \
3547 .proc_handler = proc, \
3548 }
3549
3550#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3551 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3552
3553#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3554 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3555
3556#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3557 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3558
3559#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3560 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3561
3562#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3563 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3564
3565static struct neigh_sysctl_table {
3566 struct ctl_table_header *sysctl_header;
3567 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3568} neigh_sysctl_template __read_mostly = {
3569 .neigh_vars = {
3570 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3571 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3572 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3573 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3574 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3575 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3576 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3577 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3578 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3579 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3580 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3581 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3582 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3583 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3584 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3585 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3586 [NEIGH_VAR_GC_INTERVAL] = {
3587 .procname = "gc_interval",
3588 .maxlen = sizeof(int),
3589 .mode = 0644,
3590 .proc_handler = proc_dointvec_jiffies,
3591 },
3592 [NEIGH_VAR_GC_THRESH1] = {
3593 .procname = "gc_thresh1",
3594 .maxlen = sizeof(int),
3595 .mode = 0644,
3596 .extra1 = SYSCTL_ZERO,
3597 .extra2 = SYSCTL_INT_MAX,
3598 .proc_handler = proc_dointvec_minmax,
3599 },
3600 [NEIGH_VAR_GC_THRESH2] = {
3601 .procname = "gc_thresh2",
3602 .maxlen = sizeof(int),
3603 .mode = 0644,
3604 .extra1 = SYSCTL_ZERO,
3605 .extra2 = SYSCTL_INT_MAX,
3606 .proc_handler = proc_dointvec_minmax,
3607 },
3608 [NEIGH_VAR_GC_THRESH3] = {
3609 .procname = "gc_thresh3",
3610 .maxlen = sizeof(int),
3611 .mode = 0644,
3612 .extra1 = SYSCTL_ZERO,
3613 .extra2 = SYSCTL_INT_MAX,
3614 .proc_handler = proc_dointvec_minmax,
3615 },
3616 {},
3617 },
3618};
3619
3620int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3621 proc_handler *handler)
3622{
3623 int i;
3624 struct neigh_sysctl_table *t;
3625 const char *dev_name_source;
3626 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3627 char *p_name;
3628
3629 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3630 if (!t)
3631 goto err;
3632
3633 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3634 t->neigh_vars[i].data += (long) p;
3635 t->neigh_vars[i].extra1 = dev;
3636 t->neigh_vars[i].extra2 = p;
3637 }
3638
3639 if (dev) {
3640 dev_name_source = dev->name;
3641 /* Terminate the table early */
3642 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3643 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3644 } else {
3645 struct neigh_table *tbl = p->tbl;
3646 dev_name_source = "default";
3647 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3648 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3649 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3650 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3651 }
3652
3653 if (handler) {
3654 /* RetransTime */
3655 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3656 /* ReachableTime */
3657 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3658 /* RetransTime (in milliseconds)*/
3659 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3660 /* ReachableTime (in milliseconds) */
3661 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3662 } else {
3663 /* Those handlers will update p->reachable_time after
3664 * base_reachable_time(_ms) is set to ensure the new timer starts being
3665 * applied after the next neighbour update instead of waiting for
3666 * neigh_periodic_work to update its value (can be multiple minutes)
3667 * So any handler that replaces them should do this as well
3668 */
3669 /* ReachableTime */
3670 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3671 neigh_proc_base_reachable_time;
3672 /* ReachableTime (in milliseconds) */
3673 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3674 neigh_proc_base_reachable_time;
3675 }
3676
3677 /* Don't export sysctls to unprivileged users */
3678 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3679 t->neigh_vars[0].procname = NULL;
3680
3681 switch (neigh_parms_family(p)) {
3682 case AF_INET:
3683 p_name = "ipv4";
3684 break;
3685 case AF_INET6:
3686 p_name = "ipv6";
3687 break;
3688 default:
3689 BUG();
3690 }
3691
3692 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3693 p_name, dev_name_source);
3694 t->sysctl_header =
3695 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3696 if (!t->sysctl_header)
3697 goto free;
3698
3699 p->sysctl_table = t;
3700 return 0;
3701
3702free:
3703 kfree(t);
3704err:
3705 return -ENOBUFS;
3706}
3707EXPORT_SYMBOL(neigh_sysctl_register);
3708
3709void neigh_sysctl_unregister(struct neigh_parms *p)
3710{
3711 if (p->sysctl_table) {
3712 struct neigh_sysctl_table *t = p->sysctl_table;
3713 p->sysctl_table = NULL;
3714 unregister_net_sysctl_table(t->sysctl_header);
3715 kfree(t);
3716 }
3717}
3718EXPORT_SYMBOL(neigh_sysctl_unregister);
3719
3720#endif /* CONFIG_SYSCTL */
3721
3722static int __init neigh_init(void)
3723{
3724 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3725 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3726 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3727
3728 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3729 0);
3730 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3731
3732 return 0;
3733}
3734
3735subsys_initcall(neigh_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/slab.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/socket.h>
21#include <linux/netdevice.h>
22#include <linux/proc_fs.h>
23#ifdef CONFIG_SYSCTL
24#include <linux/sysctl.h>
25#endif
26#include <linux/times.h>
27#include <net/net_namespace.h>
28#include <net/neighbour.h>
29#include <net/arp.h>
30#include <net/dst.h>
31#include <net/sock.h>
32#include <net/netevent.h>
33#include <net/netlink.h>
34#include <linux/rtnetlink.h>
35#include <linux/random.h>
36#include <linux/string.h>
37#include <linux/log2.h>
38#include <linux/inetdevice.h>
39#include <net/addrconf.h>
40
41#include <trace/events/neigh.h>
42
43#define NEIGH_DEBUG 1
44#define neigh_dbg(level, fmt, ...) \
45do { \
46 if (level <= NEIGH_DEBUG) \
47 pr_debug(fmt, ##__VA_ARGS__); \
48} while (0)
49
50#define PNEIGH_HASHMASK 0xF
51
52static void neigh_timer_handler(struct timer_list *t);
53static void __neigh_notify(struct neighbour *n, int type, int flags,
54 u32 pid);
55static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
56static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
57 struct net_device *dev);
58
59#ifdef CONFIG_PROC_FS
60static const struct seq_operations neigh_stat_seq_ops;
61#endif
62
63static struct hlist_head *neigh_get_dev_table(struct net_device *dev, int family)
64{
65 int i;
66
67 switch (family) {
68 default:
69 DEBUG_NET_WARN_ON_ONCE(1);
70 fallthrough; /* to avoid panic by null-ptr-deref */
71 case AF_INET:
72 i = NEIGH_ARP_TABLE;
73 break;
74 case AF_INET6:
75 i = NEIGH_ND_TABLE;
76 break;
77 }
78
79 return &dev->neighbours[i];
80}
81
82/*
83 Neighbour hash table buckets are protected with rwlock tbl->lock.
84
85 - All the scans/updates to hash buckets MUST be made under this lock.
86 - NOTHING clever should be made under this lock: no callbacks
87 to protocol backends, no attempts to send something to network.
88 It will result in deadlocks, if backend/driver wants to use neighbour
89 cache.
90 - If the entry requires some non-trivial actions, increase
91 its reference count and release table lock.
92
93 Neighbour entries are protected:
94 - with reference count.
95 - with rwlock neigh->lock
96
97 Reference count prevents destruction.
98
99 neigh->lock mainly serializes ll address data and its validity state.
100 However, the same lock is used to protect another entry fields:
101 - timer
102 - resolution queue
103
104 Again, nothing clever shall be made under neigh->lock,
105 the most complicated procedure, which we allow is dev->hard_header.
106 It is supposed, that dev->hard_header is simplistic and does
107 not make callbacks to neighbour tables.
108 */
109
110static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
111{
112 kfree_skb(skb);
113 return -ENETDOWN;
114}
115
116static void neigh_cleanup_and_release(struct neighbour *neigh)
117{
118 trace_neigh_cleanup_and_release(neigh, 0);
119 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
120 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
121 neigh_release(neigh);
122}
123
124/*
125 * It is random distribution in the interval (1/2)*base...(3/2)*base.
126 * It corresponds to default IPv6 settings and is not overridable,
127 * because it is really reasonable choice.
128 */
129
130unsigned long neigh_rand_reach_time(unsigned long base)
131{
132 return base ? get_random_u32_below(base) + (base >> 1) : 0;
133}
134EXPORT_SYMBOL(neigh_rand_reach_time);
135
136static void neigh_mark_dead(struct neighbour *n)
137{
138 n->dead = 1;
139 if (!list_empty(&n->gc_list)) {
140 list_del_init(&n->gc_list);
141 atomic_dec(&n->tbl->gc_entries);
142 }
143 if (!list_empty(&n->managed_list))
144 list_del_init(&n->managed_list);
145}
146
147static void neigh_update_gc_list(struct neighbour *n)
148{
149 bool on_gc_list, exempt_from_gc;
150
151 write_lock_bh(&n->tbl->lock);
152 write_lock(&n->lock);
153 if (n->dead)
154 goto out;
155
156 /* remove from the gc list if new state is permanent or if neighbor
157 * is externally learned; otherwise entry should be on the gc list
158 */
159 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
160 n->flags & NTF_EXT_LEARNED;
161 on_gc_list = !list_empty(&n->gc_list);
162
163 if (exempt_from_gc && on_gc_list) {
164 list_del_init(&n->gc_list);
165 atomic_dec(&n->tbl->gc_entries);
166 } else if (!exempt_from_gc && !on_gc_list) {
167 /* add entries to the tail; cleaning removes from the front */
168 list_add_tail(&n->gc_list, &n->tbl->gc_list);
169 atomic_inc(&n->tbl->gc_entries);
170 }
171out:
172 write_unlock(&n->lock);
173 write_unlock_bh(&n->tbl->lock);
174}
175
176static void neigh_update_managed_list(struct neighbour *n)
177{
178 bool on_managed_list, add_to_managed;
179
180 write_lock_bh(&n->tbl->lock);
181 write_lock(&n->lock);
182 if (n->dead)
183 goto out;
184
185 add_to_managed = n->flags & NTF_MANAGED;
186 on_managed_list = !list_empty(&n->managed_list);
187
188 if (!add_to_managed && on_managed_list)
189 list_del_init(&n->managed_list);
190 else if (add_to_managed && !on_managed_list)
191 list_add_tail(&n->managed_list, &n->tbl->managed_list);
192out:
193 write_unlock(&n->lock);
194 write_unlock_bh(&n->tbl->lock);
195}
196
197static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
198 bool *gc_update, bool *managed_update)
199{
200 u32 ndm_flags, old_flags = neigh->flags;
201
202 if (!(flags & NEIGH_UPDATE_F_ADMIN))
203 return;
204
205 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
206 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
207
208 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
209 if (ndm_flags & NTF_EXT_LEARNED)
210 neigh->flags |= NTF_EXT_LEARNED;
211 else
212 neigh->flags &= ~NTF_EXT_LEARNED;
213 *notify = 1;
214 *gc_update = true;
215 }
216 if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
217 if (ndm_flags & NTF_MANAGED)
218 neigh->flags |= NTF_MANAGED;
219 else
220 neigh->flags &= ~NTF_MANAGED;
221 *notify = 1;
222 *managed_update = true;
223 }
224}
225
226bool neigh_remove_one(struct neighbour *n)
227{
228 bool retval = false;
229
230 write_lock(&n->lock);
231 if (refcount_read(&n->refcnt) == 1) {
232 hlist_del_rcu(&n->hash);
233 hlist_del_rcu(&n->dev_list);
234 neigh_mark_dead(n);
235 retval = true;
236 }
237 write_unlock(&n->lock);
238 if (retval)
239 neigh_cleanup_and_release(n);
240 return retval;
241}
242
243static int neigh_forced_gc(struct neigh_table *tbl)
244{
245 int max_clean = atomic_read(&tbl->gc_entries) -
246 READ_ONCE(tbl->gc_thresh2);
247 u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
248 unsigned long tref = jiffies - 5 * HZ;
249 struct neighbour *n, *tmp;
250 int shrunk = 0;
251 int loop = 0;
252
253 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
254
255 write_lock_bh(&tbl->lock);
256
257 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
258 if (refcount_read(&n->refcnt) == 1) {
259 bool remove = false;
260
261 write_lock(&n->lock);
262 if ((n->nud_state == NUD_FAILED) ||
263 (n->nud_state == NUD_NOARP) ||
264 (tbl->is_multicast &&
265 tbl->is_multicast(n->primary_key)) ||
266 !time_in_range(n->updated, tref, jiffies))
267 remove = true;
268 write_unlock(&n->lock);
269
270 if (remove && neigh_remove_one(n))
271 shrunk++;
272 if (shrunk >= max_clean)
273 break;
274 if (++loop == 16) {
275 if (ktime_get_ns() > tmax)
276 goto unlock;
277 loop = 0;
278 }
279 }
280 }
281
282 WRITE_ONCE(tbl->last_flush, jiffies);
283unlock:
284 write_unlock_bh(&tbl->lock);
285
286 return shrunk;
287}
288
289static void neigh_add_timer(struct neighbour *n, unsigned long when)
290{
291 /* Use safe distance from the jiffies - LONG_MAX point while timer
292 * is running in DELAY/PROBE state but still show to user space
293 * large times in the past.
294 */
295 unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
296
297 neigh_hold(n);
298 if (!time_in_range(n->confirmed, mint, jiffies))
299 n->confirmed = mint;
300 if (time_before(n->used, n->confirmed))
301 n->used = n->confirmed;
302 if (unlikely(mod_timer(&n->timer, when))) {
303 printk("NEIGH: BUG, double timer add, state is %x\n",
304 n->nud_state);
305 dump_stack();
306 }
307}
308
309static int neigh_del_timer(struct neighbour *n)
310{
311 if ((n->nud_state & NUD_IN_TIMER) &&
312 del_timer(&n->timer)) {
313 neigh_release(n);
314 return 1;
315 }
316 return 0;
317}
318
319static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
320 int family)
321{
322 switch (family) {
323 case AF_INET:
324 return __in_dev_arp_parms_get_rcu(dev);
325 case AF_INET6:
326 return __in6_dev_nd_parms_get_rcu(dev);
327 }
328 return NULL;
329}
330
331static void neigh_parms_qlen_dec(struct net_device *dev, int family)
332{
333 struct neigh_parms *p;
334
335 rcu_read_lock();
336 p = neigh_get_dev_parms_rcu(dev, family);
337 if (p)
338 p->qlen--;
339 rcu_read_unlock();
340}
341
342static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
343 int family)
344{
345 struct sk_buff_head tmp;
346 unsigned long flags;
347 struct sk_buff *skb;
348
349 skb_queue_head_init(&tmp);
350 spin_lock_irqsave(&list->lock, flags);
351 skb = skb_peek(list);
352 while (skb != NULL) {
353 struct sk_buff *skb_next = skb_peek_next(skb, list);
354 struct net_device *dev = skb->dev;
355
356 if (net == NULL || net_eq(dev_net(dev), net)) {
357 neigh_parms_qlen_dec(dev, family);
358 __skb_unlink(skb, list);
359 __skb_queue_tail(&tmp, skb);
360 }
361 skb = skb_next;
362 }
363 spin_unlock_irqrestore(&list->lock, flags);
364
365 while ((skb = __skb_dequeue(&tmp))) {
366 dev_put(skb->dev);
367 kfree_skb(skb);
368 }
369}
370
371static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
372 bool skip_perm)
373{
374 struct hlist_head *dev_head;
375 struct hlist_node *tmp;
376 struct neighbour *n;
377
378 dev_head = neigh_get_dev_table(dev, tbl->family);
379
380 hlist_for_each_entry_safe(n, tmp, dev_head, dev_list) {
381 if (skip_perm && n->nud_state & NUD_PERMANENT)
382 continue;
383
384 hlist_del_rcu(&n->hash);
385 hlist_del_rcu(&n->dev_list);
386 write_lock(&n->lock);
387 neigh_del_timer(n);
388 neigh_mark_dead(n);
389 if (refcount_read(&n->refcnt) != 1) {
390 /* The most unpleasant situation.
391 * We must destroy neighbour entry,
392 * but someone still uses it.
393 *
394 * The destroy will be delayed until
395 * the last user releases us, but
396 * we must kill timers etc. and move
397 * it to safe state.
398 */
399 __skb_queue_purge(&n->arp_queue);
400 n->arp_queue_len_bytes = 0;
401 WRITE_ONCE(n->output, neigh_blackhole);
402 if (n->nud_state & NUD_VALID)
403 n->nud_state = NUD_NOARP;
404 else
405 n->nud_state = NUD_NONE;
406 neigh_dbg(2, "neigh %p is stray\n", n);
407 }
408 write_unlock(&n->lock);
409 neigh_cleanup_and_release(n);
410 }
411}
412
413void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
414{
415 write_lock_bh(&tbl->lock);
416 neigh_flush_dev(tbl, dev, false);
417 write_unlock_bh(&tbl->lock);
418}
419EXPORT_SYMBOL(neigh_changeaddr);
420
421static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
422 bool skip_perm)
423{
424 write_lock_bh(&tbl->lock);
425 neigh_flush_dev(tbl, dev, skip_perm);
426 pneigh_ifdown_and_unlock(tbl, dev);
427 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
428 tbl->family);
429 if (skb_queue_empty_lockless(&tbl->proxy_queue))
430 del_timer_sync(&tbl->proxy_timer);
431 return 0;
432}
433
434int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
435{
436 __neigh_ifdown(tbl, dev, true);
437 return 0;
438}
439EXPORT_SYMBOL(neigh_carrier_down);
440
441int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
442{
443 __neigh_ifdown(tbl, dev, false);
444 return 0;
445}
446EXPORT_SYMBOL(neigh_ifdown);
447
448static struct neighbour *neigh_alloc(struct neigh_table *tbl,
449 struct net_device *dev,
450 u32 flags, bool exempt_from_gc)
451{
452 struct neighbour *n = NULL;
453 unsigned long now = jiffies;
454 int entries, gc_thresh3;
455
456 if (exempt_from_gc)
457 goto do_alloc;
458
459 entries = atomic_inc_return(&tbl->gc_entries) - 1;
460 gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
461 if (entries >= gc_thresh3 ||
462 (entries >= READ_ONCE(tbl->gc_thresh2) &&
463 time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
464 if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
465 net_info_ratelimited("%s: neighbor table overflow!\n",
466 tbl->id);
467 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
468 goto out_entries;
469 }
470 }
471
472do_alloc:
473 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
474 if (!n)
475 goto out_entries;
476
477 __skb_queue_head_init(&n->arp_queue);
478 rwlock_init(&n->lock);
479 seqlock_init(&n->ha_lock);
480 n->updated = n->used = now;
481 n->nud_state = NUD_NONE;
482 n->output = neigh_blackhole;
483 n->flags = flags;
484 seqlock_init(&n->hh.hh_lock);
485 n->parms = neigh_parms_clone(&tbl->parms);
486 timer_setup(&n->timer, neigh_timer_handler, 0);
487
488 NEIGH_CACHE_STAT_INC(tbl, allocs);
489 n->tbl = tbl;
490 refcount_set(&n->refcnt, 1);
491 n->dead = 1;
492 INIT_LIST_HEAD(&n->gc_list);
493 INIT_LIST_HEAD(&n->managed_list);
494
495 atomic_inc(&tbl->entries);
496out:
497 return n;
498
499out_entries:
500 if (!exempt_from_gc)
501 atomic_dec(&tbl->gc_entries);
502 goto out;
503}
504
505static void neigh_get_hash_rnd(u32 *x)
506{
507 *x = get_random_u32() | 1;
508}
509
510static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
511{
512 size_t size = (1 << shift) * sizeof(struct hlist_head);
513 struct hlist_head *hash_heads;
514 struct neigh_hash_table *ret;
515 int i;
516
517 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
518 if (!ret)
519 return NULL;
520
521 hash_heads = kvzalloc(size, GFP_ATOMIC);
522 if (!hash_heads) {
523 kfree(ret);
524 return NULL;
525 }
526 ret->hash_heads = hash_heads;
527 ret->hash_shift = shift;
528 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
529 neigh_get_hash_rnd(&ret->hash_rnd[i]);
530 return ret;
531}
532
533static void neigh_hash_free_rcu(struct rcu_head *head)
534{
535 struct neigh_hash_table *nht = container_of(head,
536 struct neigh_hash_table,
537 rcu);
538
539 kvfree(nht->hash_heads);
540 kfree(nht);
541}
542
543static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
544 unsigned long new_shift)
545{
546 unsigned int i, hash;
547 struct neigh_hash_table *new_nht, *old_nht;
548
549 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
550
551 old_nht = rcu_dereference_protected(tbl->nht,
552 lockdep_is_held(&tbl->lock));
553 new_nht = neigh_hash_alloc(new_shift);
554 if (!new_nht)
555 return old_nht;
556
557 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
558 struct hlist_node *tmp;
559 struct neighbour *n;
560
561 neigh_for_each_in_bucket_safe(n, tmp, &old_nht->hash_heads[i]) {
562 hash = tbl->hash(n->primary_key, n->dev,
563 new_nht->hash_rnd);
564
565 hash >>= (32 - new_nht->hash_shift);
566
567 hlist_del_rcu(&n->hash);
568 hlist_add_head_rcu(&n->hash, &new_nht->hash_heads[hash]);
569 }
570 }
571
572 rcu_assign_pointer(tbl->nht, new_nht);
573 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
574 return new_nht;
575}
576
577struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
578 struct net_device *dev)
579{
580 struct neighbour *n;
581
582 NEIGH_CACHE_STAT_INC(tbl, lookups);
583
584 rcu_read_lock();
585 n = __neigh_lookup_noref(tbl, pkey, dev);
586 if (n) {
587 if (!refcount_inc_not_zero(&n->refcnt))
588 n = NULL;
589 NEIGH_CACHE_STAT_INC(tbl, hits);
590 }
591
592 rcu_read_unlock();
593 return n;
594}
595EXPORT_SYMBOL(neigh_lookup);
596
597static struct neighbour *
598___neigh_create(struct neigh_table *tbl, const void *pkey,
599 struct net_device *dev, u32 flags,
600 bool exempt_from_gc, bool want_ref)
601{
602 u32 hash_val, key_len = tbl->key_len;
603 struct neighbour *n1, *rc, *n;
604 struct neigh_hash_table *nht;
605 int error;
606
607 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
608 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
609 if (!n) {
610 rc = ERR_PTR(-ENOBUFS);
611 goto out;
612 }
613
614 memcpy(n->primary_key, pkey, key_len);
615 n->dev = dev;
616 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
617
618 /* Protocol specific setup. */
619 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
620 rc = ERR_PTR(error);
621 goto out_neigh_release;
622 }
623
624 if (dev->netdev_ops->ndo_neigh_construct) {
625 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
626 if (error < 0) {
627 rc = ERR_PTR(error);
628 goto out_neigh_release;
629 }
630 }
631
632 /* Device specific setup. */
633 if (n->parms->neigh_setup &&
634 (error = n->parms->neigh_setup(n)) < 0) {
635 rc = ERR_PTR(error);
636 goto out_neigh_release;
637 }
638
639 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
640
641 write_lock_bh(&tbl->lock);
642 nht = rcu_dereference_protected(tbl->nht,
643 lockdep_is_held(&tbl->lock));
644
645 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
646 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
647
648 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
649
650 if (n->parms->dead) {
651 rc = ERR_PTR(-EINVAL);
652 goto out_tbl_unlock;
653 }
654
655 neigh_for_each_in_bucket(n1, &nht->hash_heads[hash_val]) {
656 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
657 if (want_ref)
658 neigh_hold(n1);
659 rc = n1;
660 goto out_tbl_unlock;
661 }
662 }
663
664 n->dead = 0;
665 if (!exempt_from_gc)
666 list_add_tail(&n->gc_list, &n->tbl->gc_list);
667 if (n->flags & NTF_MANAGED)
668 list_add_tail(&n->managed_list, &n->tbl->managed_list);
669 if (want_ref)
670 neigh_hold(n);
671 hlist_add_head_rcu(&n->hash, &nht->hash_heads[hash_val]);
672
673 hlist_add_head_rcu(&n->dev_list,
674 neigh_get_dev_table(dev, tbl->family));
675
676 write_unlock_bh(&tbl->lock);
677 neigh_dbg(2, "neigh %p is created\n", n);
678 rc = n;
679out:
680 return rc;
681out_tbl_unlock:
682 write_unlock_bh(&tbl->lock);
683out_neigh_release:
684 if (!exempt_from_gc)
685 atomic_dec(&tbl->gc_entries);
686 neigh_release(n);
687 goto out;
688}
689
690struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
691 struct net_device *dev, bool want_ref)
692{
693 bool exempt_from_gc = !!(dev->flags & IFF_LOOPBACK);
694
695 return ___neigh_create(tbl, pkey, dev, 0, exempt_from_gc, want_ref);
696}
697EXPORT_SYMBOL(__neigh_create);
698
699static u32 pneigh_hash(const void *pkey, unsigned int key_len)
700{
701 u32 hash_val = *(u32 *)(pkey + key_len - 4);
702 hash_val ^= (hash_val >> 16);
703 hash_val ^= hash_val >> 8;
704 hash_val ^= hash_val >> 4;
705 hash_val &= PNEIGH_HASHMASK;
706 return hash_val;
707}
708
709static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
710 struct net *net,
711 const void *pkey,
712 unsigned int key_len,
713 struct net_device *dev)
714{
715 while (n) {
716 if (!memcmp(n->key, pkey, key_len) &&
717 net_eq(pneigh_net(n), net) &&
718 (n->dev == dev || !n->dev))
719 return n;
720 n = n->next;
721 }
722 return NULL;
723}
724
725struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
726 struct net *net, const void *pkey, struct net_device *dev)
727{
728 unsigned int key_len = tbl->key_len;
729 u32 hash_val = pneigh_hash(pkey, key_len);
730
731 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
732 net, pkey, key_len, dev);
733}
734EXPORT_SYMBOL_GPL(__pneigh_lookup);
735
736struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
737 struct net *net, const void *pkey,
738 struct net_device *dev, int creat)
739{
740 struct pneigh_entry *n;
741 unsigned int key_len = tbl->key_len;
742 u32 hash_val = pneigh_hash(pkey, key_len);
743
744 read_lock_bh(&tbl->lock);
745 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
746 net, pkey, key_len, dev);
747 read_unlock_bh(&tbl->lock);
748
749 if (n || !creat)
750 goto out;
751
752 ASSERT_RTNL();
753
754 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
755 if (!n)
756 goto out;
757
758 write_pnet(&n->net, net);
759 memcpy(n->key, pkey, key_len);
760 n->dev = dev;
761 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
762
763 if (tbl->pconstructor && tbl->pconstructor(n)) {
764 netdev_put(dev, &n->dev_tracker);
765 kfree(n);
766 n = NULL;
767 goto out;
768 }
769
770 write_lock_bh(&tbl->lock);
771 n->next = tbl->phash_buckets[hash_val];
772 tbl->phash_buckets[hash_val] = n;
773 write_unlock_bh(&tbl->lock);
774out:
775 return n;
776}
777EXPORT_SYMBOL(pneigh_lookup);
778
779
780int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
781 struct net_device *dev)
782{
783 struct pneigh_entry *n, **np;
784 unsigned int key_len = tbl->key_len;
785 u32 hash_val = pneigh_hash(pkey, key_len);
786
787 write_lock_bh(&tbl->lock);
788 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
789 np = &n->next) {
790 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
791 net_eq(pneigh_net(n), net)) {
792 *np = n->next;
793 write_unlock_bh(&tbl->lock);
794 if (tbl->pdestructor)
795 tbl->pdestructor(n);
796 netdev_put(n->dev, &n->dev_tracker);
797 kfree(n);
798 return 0;
799 }
800 }
801 write_unlock_bh(&tbl->lock);
802 return -ENOENT;
803}
804
805static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
806 struct net_device *dev)
807{
808 struct pneigh_entry *n, **np, *freelist = NULL;
809 u32 h;
810
811 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
812 np = &tbl->phash_buckets[h];
813 while ((n = *np) != NULL) {
814 if (!dev || n->dev == dev) {
815 *np = n->next;
816 n->next = freelist;
817 freelist = n;
818 continue;
819 }
820 np = &n->next;
821 }
822 }
823 write_unlock_bh(&tbl->lock);
824 while ((n = freelist)) {
825 freelist = n->next;
826 n->next = NULL;
827 if (tbl->pdestructor)
828 tbl->pdestructor(n);
829 netdev_put(n->dev, &n->dev_tracker);
830 kfree(n);
831 }
832 return -ENOENT;
833}
834
835static void neigh_parms_destroy(struct neigh_parms *parms);
836
837static inline void neigh_parms_put(struct neigh_parms *parms)
838{
839 if (refcount_dec_and_test(&parms->refcnt))
840 neigh_parms_destroy(parms);
841}
842
843/*
844 * neighbour must already be out of the table;
845 *
846 */
847void neigh_destroy(struct neighbour *neigh)
848{
849 struct net_device *dev = neigh->dev;
850
851 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
852
853 if (!neigh->dead) {
854 pr_warn("Destroying alive neighbour %p\n", neigh);
855 dump_stack();
856 return;
857 }
858
859 if (neigh_del_timer(neigh))
860 pr_warn("Impossible event\n");
861
862 write_lock_bh(&neigh->lock);
863 __skb_queue_purge(&neigh->arp_queue);
864 write_unlock_bh(&neigh->lock);
865 neigh->arp_queue_len_bytes = 0;
866
867 if (dev->netdev_ops->ndo_neigh_destroy)
868 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
869
870 netdev_put(dev, &neigh->dev_tracker);
871 neigh_parms_put(neigh->parms);
872
873 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
874
875 atomic_dec(&neigh->tbl->entries);
876 kfree_rcu(neigh, rcu);
877}
878EXPORT_SYMBOL(neigh_destroy);
879
880/* Neighbour state is suspicious;
881 disable fast path.
882
883 Called with write_locked neigh.
884 */
885static void neigh_suspect(struct neighbour *neigh)
886{
887 neigh_dbg(2, "neigh %p is suspected\n", neigh);
888
889 WRITE_ONCE(neigh->output, neigh->ops->output);
890}
891
892/* Neighbour state is OK;
893 enable fast path.
894
895 Called with write_locked neigh.
896 */
897static void neigh_connect(struct neighbour *neigh)
898{
899 neigh_dbg(2, "neigh %p is connected\n", neigh);
900
901 WRITE_ONCE(neigh->output, neigh->ops->connected_output);
902}
903
904static void neigh_periodic_work(struct work_struct *work)
905{
906 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
907 struct neigh_hash_table *nht;
908 struct hlist_node *tmp;
909 struct neighbour *n;
910 unsigned int i;
911
912 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
913
914 write_lock_bh(&tbl->lock);
915 nht = rcu_dereference_protected(tbl->nht,
916 lockdep_is_held(&tbl->lock));
917
918 /*
919 * periodically recompute ReachableTime from random function
920 */
921
922 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
923 struct neigh_parms *p;
924
925 WRITE_ONCE(tbl->last_rand, jiffies);
926 list_for_each_entry(p, &tbl->parms_list, list)
927 p->reachable_time =
928 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
929 }
930
931 if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
932 goto out;
933
934 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
935 neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[i]) {
936 unsigned int state;
937
938 write_lock(&n->lock);
939
940 state = n->nud_state;
941 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
942 (n->flags & NTF_EXT_LEARNED)) {
943 write_unlock(&n->lock);
944 continue;
945 }
946
947 if (time_before(n->used, n->confirmed) &&
948 time_is_before_eq_jiffies(n->confirmed))
949 n->used = n->confirmed;
950
951 if (refcount_read(&n->refcnt) == 1 &&
952 (state == NUD_FAILED ||
953 !time_in_range_open(jiffies, n->used,
954 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
955 hlist_del_rcu(&n->hash);
956 hlist_del_rcu(&n->dev_list);
957 neigh_mark_dead(n);
958 write_unlock(&n->lock);
959 neigh_cleanup_and_release(n);
960 continue;
961 }
962 write_unlock(&n->lock);
963 }
964 /*
965 * It's fine to release lock here, even if hash table
966 * grows while we are preempted.
967 */
968 write_unlock_bh(&tbl->lock);
969 cond_resched();
970 write_lock_bh(&tbl->lock);
971 nht = rcu_dereference_protected(tbl->nht,
972 lockdep_is_held(&tbl->lock));
973 }
974out:
975 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
976 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
977 * BASE_REACHABLE_TIME.
978 */
979 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
980 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
981 write_unlock_bh(&tbl->lock);
982}
983
984static __inline__ int neigh_max_probes(struct neighbour *n)
985{
986 struct neigh_parms *p = n->parms;
987 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
988 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
989 NEIGH_VAR(p, MCAST_PROBES));
990}
991
992static void neigh_invalidate(struct neighbour *neigh)
993 __releases(neigh->lock)
994 __acquires(neigh->lock)
995{
996 struct sk_buff *skb;
997
998 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
999 neigh_dbg(2, "neigh %p is failed\n", neigh);
1000 neigh->updated = jiffies;
1001
1002 /* It is very thin place. report_unreachable is very complicated
1003 routine. Particularly, it can hit the same neighbour entry!
1004
1005 So that, we try to be accurate and avoid dead loop. --ANK
1006 */
1007 while (neigh->nud_state == NUD_FAILED &&
1008 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1009 write_unlock(&neigh->lock);
1010 neigh->ops->error_report(neigh, skb);
1011 write_lock(&neigh->lock);
1012 }
1013 __skb_queue_purge(&neigh->arp_queue);
1014 neigh->arp_queue_len_bytes = 0;
1015}
1016
1017static void neigh_probe(struct neighbour *neigh)
1018 __releases(neigh->lock)
1019{
1020 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1021 /* keep skb alive even if arp_queue overflows */
1022 if (skb)
1023 skb = skb_clone(skb, GFP_ATOMIC);
1024 write_unlock(&neigh->lock);
1025 if (neigh->ops->solicit)
1026 neigh->ops->solicit(neigh, skb);
1027 atomic_inc(&neigh->probes);
1028 consume_skb(skb);
1029}
1030
1031/* Called when a timer expires for a neighbour entry. */
1032
1033static void neigh_timer_handler(struct timer_list *t)
1034{
1035 unsigned long now, next;
1036 struct neighbour *neigh = from_timer(neigh, t, timer);
1037 unsigned int state;
1038 int notify = 0;
1039
1040 write_lock(&neigh->lock);
1041
1042 state = neigh->nud_state;
1043 now = jiffies;
1044 next = now + HZ;
1045
1046 if (!(state & NUD_IN_TIMER))
1047 goto out;
1048
1049 if (state & NUD_REACHABLE) {
1050 if (time_before_eq(now,
1051 neigh->confirmed + neigh->parms->reachable_time)) {
1052 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1053 next = neigh->confirmed + neigh->parms->reachable_time;
1054 } else if (time_before_eq(now,
1055 neigh->used +
1056 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1057 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1058 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1059 neigh->updated = jiffies;
1060 neigh_suspect(neigh);
1061 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1062 } else {
1063 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1064 WRITE_ONCE(neigh->nud_state, NUD_STALE);
1065 neigh->updated = jiffies;
1066 neigh_suspect(neigh);
1067 notify = 1;
1068 }
1069 } else if (state & NUD_DELAY) {
1070 if (time_before_eq(now,
1071 neigh->confirmed +
1072 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1073 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1074 WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1075 neigh->updated = jiffies;
1076 neigh_connect(neigh);
1077 notify = 1;
1078 next = neigh->confirmed + neigh->parms->reachable_time;
1079 } else {
1080 neigh_dbg(2, "neigh %p is probed\n", neigh);
1081 WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1082 neigh->updated = jiffies;
1083 atomic_set(&neigh->probes, 0);
1084 notify = 1;
1085 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1086 HZ/100);
1087 }
1088 } else {
1089 /* NUD_PROBE|NUD_INCOMPLETE */
1090 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1091 }
1092
1093 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1094 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1095 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1096 notify = 1;
1097 neigh_invalidate(neigh);
1098 goto out;
1099 }
1100
1101 if (neigh->nud_state & NUD_IN_TIMER) {
1102 if (time_before(next, jiffies + HZ/100))
1103 next = jiffies + HZ/100;
1104 if (!mod_timer(&neigh->timer, next))
1105 neigh_hold(neigh);
1106 }
1107 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1108 neigh_probe(neigh);
1109 } else {
1110out:
1111 write_unlock(&neigh->lock);
1112 }
1113
1114 if (notify)
1115 neigh_update_notify(neigh, 0);
1116
1117 trace_neigh_timer_handler(neigh, 0);
1118
1119 neigh_release(neigh);
1120}
1121
1122int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1123 const bool immediate_ok)
1124{
1125 int rc;
1126 bool immediate_probe = false;
1127
1128 write_lock_bh(&neigh->lock);
1129
1130 rc = 0;
1131 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1132 goto out_unlock_bh;
1133 if (neigh->dead)
1134 goto out_dead;
1135
1136 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1137 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1138 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1139 unsigned long next, now = jiffies;
1140
1141 atomic_set(&neigh->probes,
1142 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1143 neigh_del_timer(neigh);
1144 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1145 neigh->updated = now;
1146 if (!immediate_ok) {
1147 next = now + 1;
1148 } else {
1149 immediate_probe = true;
1150 next = now + max(NEIGH_VAR(neigh->parms,
1151 RETRANS_TIME),
1152 HZ / 100);
1153 }
1154 neigh_add_timer(neigh, next);
1155 } else {
1156 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1157 neigh->updated = jiffies;
1158 write_unlock_bh(&neigh->lock);
1159
1160 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1161 return 1;
1162 }
1163 } else if (neigh->nud_state & NUD_STALE) {
1164 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1165 neigh_del_timer(neigh);
1166 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1167 neigh->updated = jiffies;
1168 neigh_add_timer(neigh, jiffies +
1169 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1170 }
1171
1172 if (neigh->nud_state == NUD_INCOMPLETE) {
1173 if (skb) {
1174 while (neigh->arp_queue_len_bytes + skb->truesize >
1175 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1176 struct sk_buff *buff;
1177
1178 buff = __skb_dequeue(&neigh->arp_queue);
1179 if (!buff)
1180 break;
1181 neigh->arp_queue_len_bytes -= buff->truesize;
1182 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1183 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1184 }
1185 skb_dst_force(skb);
1186 __skb_queue_tail(&neigh->arp_queue, skb);
1187 neigh->arp_queue_len_bytes += skb->truesize;
1188 }
1189 rc = 1;
1190 }
1191out_unlock_bh:
1192 if (immediate_probe)
1193 neigh_probe(neigh);
1194 else
1195 write_unlock(&neigh->lock);
1196 local_bh_enable();
1197 trace_neigh_event_send_done(neigh, rc);
1198 return rc;
1199
1200out_dead:
1201 if (neigh->nud_state & NUD_STALE)
1202 goto out_unlock_bh;
1203 write_unlock_bh(&neigh->lock);
1204 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1205 trace_neigh_event_send_dead(neigh, 1);
1206 return 1;
1207}
1208EXPORT_SYMBOL(__neigh_event_send);
1209
1210static void neigh_update_hhs(struct neighbour *neigh)
1211{
1212 struct hh_cache *hh;
1213 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1214 = NULL;
1215
1216 if (neigh->dev->header_ops)
1217 update = neigh->dev->header_ops->cache_update;
1218
1219 if (update) {
1220 hh = &neigh->hh;
1221 if (READ_ONCE(hh->hh_len)) {
1222 write_seqlock_bh(&hh->hh_lock);
1223 update(hh, neigh->dev, neigh->ha);
1224 write_sequnlock_bh(&hh->hh_lock);
1225 }
1226 }
1227}
1228
1229/* Generic update routine.
1230 -- lladdr is new lladdr or NULL, if it is not supplied.
1231 -- new is new state.
1232 -- flags
1233 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1234 if it is different.
1235 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1236 lladdr instead of overriding it
1237 if it is different.
1238 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1239 NEIGH_UPDATE_F_USE means that the entry is user triggered.
1240 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed.
1241 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1242 NTF_ROUTER flag.
1243 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1244 a router.
1245
1246 Caller MUST hold reference count on the entry.
1247 */
1248static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1249 u8 new, u32 flags, u32 nlmsg_pid,
1250 struct netlink_ext_ack *extack)
1251{
1252 bool gc_update = false, managed_update = false;
1253 int update_isrouter = 0;
1254 struct net_device *dev;
1255 int err, notify = 0;
1256 u8 old;
1257
1258 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1259
1260 write_lock_bh(&neigh->lock);
1261
1262 dev = neigh->dev;
1263 old = neigh->nud_state;
1264 err = -EPERM;
1265
1266 if (neigh->dead) {
1267 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1268 new = old;
1269 goto out;
1270 }
1271 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1272 (old & (NUD_NOARP | NUD_PERMANENT)))
1273 goto out;
1274
1275 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update);
1276 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1277 new = old & ~NUD_PERMANENT;
1278 WRITE_ONCE(neigh->nud_state, new);
1279 err = 0;
1280 goto out;
1281 }
1282
1283 if (!(new & NUD_VALID)) {
1284 neigh_del_timer(neigh);
1285 if (old & NUD_CONNECTED)
1286 neigh_suspect(neigh);
1287 WRITE_ONCE(neigh->nud_state, new);
1288 err = 0;
1289 notify = old & NUD_VALID;
1290 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1291 (new & NUD_FAILED)) {
1292 neigh_invalidate(neigh);
1293 notify = 1;
1294 }
1295 goto out;
1296 }
1297
1298 /* Compare new lladdr with cached one */
1299 if (!dev->addr_len) {
1300 /* First case: device needs no address. */
1301 lladdr = neigh->ha;
1302 } else if (lladdr) {
1303 /* The second case: if something is already cached
1304 and a new address is proposed:
1305 - compare new & old
1306 - if they are different, check override flag
1307 */
1308 if ((old & NUD_VALID) &&
1309 !memcmp(lladdr, neigh->ha, dev->addr_len))
1310 lladdr = neigh->ha;
1311 } else {
1312 /* No address is supplied; if we know something,
1313 use it, otherwise discard the request.
1314 */
1315 err = -EINVAL;
1316 if (!(old & NUD_VALID)) {
1317 NL_SET_ERR_MSG(extack, "No link layer address given");
1318 goto out;
1319 }
1320 lladdr = neigh->ha;
1321 }
1322
1323 /* Update confirmed timestamp for neighbour entry after we
1324 * received ARP packet even if it doesn't change IP to MAC binding.
1325 */
1326 if (new & NUD_CONNECTED)
1327 neigh->confirmed = jiffies;
1328
1329 /* If entry was valid and address is not changed,
1330 do not change entry state, if new one is STALE.
1331 */
1332 err = 0;
1333 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1334 if (old & NUD_VALID) {
1335 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1336 update_isrouter = 0;
1337 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1338 (old & NUD_CONNECTED)) {
1339 lladdr = neigh->ha;
1340 new = NUD_STALE;
1341 } else
1342 goto out;
1343 } else {
1344 if (lladdr == neigh->ha && new == NUD_STALE &&
1345 !(flags & NEIGH_UPDATE_F_ADMIN))
1346 new = old;
1347 }
1348 }
1349
1350 /* Update timestamp only once we know we will make a change to the
1351 * neighbour entry. Otherwise we risk to move the locktime window with
1352 * noop updates and ignore relevant ARP updates.
1353 */
1354 if (new != old || lladdr != neigh->ha)
1355 neigh->updated = jiffies;
1356
1357 if (new != old) {
1358 neigh_del_timer(neigh);
1359 if (new & NUD_PROBE)
1360 atomic_set(&neigh->probes, 0);
1361 if (new & NUD_IN_TIMER)
1362 neigh_add_timer(neigh, (jiffies +
1363 ((new & NUD_REACHABLE) ?
1364 neigh->parms->reachable_time :
1365 0)));
1366 WRITE_ONCE(neigh->nud_state, new);
1367 notify = 1;
1368 }
1369
1370 if (lladdr != neigh->ha) {
1371 write_seqlock(&neigh->ha_lock);
1372 memcpy(&neigh->ha, lladdr, dev->addr_len);
1373 write_sequnlock(&neigh->ha_lock);
1374 neigh_update_hhs(neigh);
1375 if (!(new & NUD_CONNECTED))
1376 neigh->confirmed = jiffies -
1377 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1378 notify = 1;
1379 }
1380 if (new == old)
1381 goto out;
1382 if (new & NUD_CONNECTED)
1383 neigh_connect(neigh);
1384 else
1385 neigh_suspect(neigh);
1386 if (!(old & NUD_VALID)) {
1387 struct sk_buff *skb;
1388
1389 /* Again: avoid dead loop if something went wrong */
1390
1391 while (neigh->nud_state & NUD_VALID &&
1392 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1393 struct dst_entry *dst = skb_dst(skb);
1394 struct neighbour *n2, *n1 = neigh;
1395 write_unlock_bh(&neigh->lock);
1396
1397 rcu_read_lock();
1398
1399 /* Why not just use 'neigh' as-is? The problem is that
1400 * things such as shaper, eql, and sch_teql can end up
1401 * using alternative, different, neigh objects to output
1402 * the packet in the output path. So what we need to do
1403 * here is re-lookup the top-level neigh in the path so
1404 * we can reinject the packet there.
1405 */
1406 n2 = NULL;
1407 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1408 n2 = dst_neigh_lookup_skb(dst, skb);
1409 if (n2)
1410 n1 = n2;
1411 }
1412 READ_ONCE(n1->output)(n1, skb);
1413 if (n2)
1414 neigh_release(n2);
1415 rcu_read_unlock();
1416
1417 write_lock_bh(&neigh->lock);
1418 }
1419 __skb_queue_purge(&neigh->arp_queue);
1420 neigh->arp_queue_len_bytes = 0;
1421 }
1422out:
1423 if (update_isrouter)
1424 neigh_update_is_router(neigh, flags, ¬ify);
1425 write_unlock_bh(&neigh->lock);
1426 if (((new ^ old) & NUD_PERMANENT) || gc_update)
1427 neigh_update_gc_list(neigh);
1428 if (managed_update)
1429 neigh_update_managed_list(neigh);
1430 if (notify)
1431 neigh_update_notify(neigh, nlmsg_pid);
1432 trace_neigh_update_done(neigh, err);
1433 return err;
1434}
1435
1436int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1437 u32 flags, u32 nlmsg_pid)
1438{
1439 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1440}
1441EXPORT_SYMBOL(neigh_update);
1442
1443/* Update the neigh to listen temporarily for probe responses, even if it is
1444 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1445 */
1446void __neigh_set_probe_once(struct neighbour *neigh)
1447{
1448 if (neigh->dead)
1449 return;
1450 neigh->updated = jiffies;
1451 if (!(neigh->nud_state & NUD_FAILED))
1452 return;
1453 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1454 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1455 neigh_add_timer(neigh,
1456 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1457 HZ/100));
1458}
1459EXPORT_SYMBOL(__neigh_set_probe_once);
1460
1461struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1462 u8 *lladdr, void *saddr,
1463 struct net_device *dev)
1464{
1465 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1466 lladdr || !dev->addr_len);
1467 if (neigh)
1468 neigh_update(neigh, lladdr, NUD_STALE,
1469 NEIGH_UPDATE_F_OVERRIDE, 0);
1470 return neigh;
1471}
1472EXPORT_SYMBOL(neigh_event_ns);
1473
1474/* called with read_lock_bh(&n->lock); */
1475static void neigh_hh_init(struct neighbour *n)
1476{
1477 struct net_device *dev = n->dev;
1478 __be16 prot = n->tbl->protocol;
1479 struct hh_cache *hh = &n->hh;
1480
1481 write_lock_bh(&n->lock);
1482
1483 /* Only one thread can come in here and initialize the
1484 * hh_cache entry.
1485 */
1486 if (!hh->hh_len)
1487 dev->header_ops->cache(n, hh, prot);
1488
1489 write_unlock_bh(&n->lock);
1490}
1491
1492/* Slow and careful. */
1493
1494int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1495{
1496 int rc = 0;
1497
1498 if (!neigh_event_send(neigh, skb)) {
1499 int err;
1500 struct net_device *dev = neigh->dev;
1501 unsigned int seq;
1502
1503 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1504 neigh_hh_init(neigh);
1505
1506 do {
1507 __skb_pull(skb, skb_network_offset(skb));
1508 seq = read_seqbegin(&neigh->ha_lock);
1509 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1510 neigh->ha, NULL, skb->len);
1511 } while (read_seqretry(&neigh->ha_lock, seq));
1512
1513 if (err >= 0)
1514 rc = dev_queue_xmit(skb);
1515 else
1516 goto out_kfree_skb;
1517 }
1518out:
1519 return rc;
1520out_kfree_skb:
1521 rc = -EINVAL;
1522 kfree_skb(skb);
1523 goto out;
1524}
1525EXPORT_SYMBOL(neigh_resolve_output);
1526
1527/* As fast as possible without hh cache */
1528
1529int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1530{
1531 struct net_device *dev = neigh->dev;
1532 unsigned int seq;
1533 int err;
1534
1535 do {
1536 __skb_pull(skb, skb_network_offset(skb));
1537 seq = read_seqbegin(&neigh->ha_lock);
1538 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1539 neigh->ha, NULL, skb->len);
1540 } while (read_seqretry(&neigh->ha_lock, seq));
1541
1542 if (err >= 0)
1543 err = dev_queue_xmit(skb);
1544 else {
1545 err = -EINVAL;
1546 kfree_skb(skb);
1547 }
1548 return err;
1549}
1550EXPORT_SYMBOL(neigh_connected_output);
1551
1552int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1553{
1554 return dev_queue_xmit(skb);
1555}
1556EXPORT_SYMBOL(neigh_direct_output);
1557
1558static void neigh_managed_work(struct work_struct *work)
1559{
1560 struct neigh_table *tbl = container_of(work, struct neigh_table,
1561 managed_work.work);
1562 struct neighbour *neigh;
1563
1564 write_lock_bh(&tbl->lock);
1565 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1566 neigh_event_send_probe(neigh, NULL, false);
1567 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1568 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1569 write_unlock_bh(&tbl->lock);
1570}
1571
1572static void neigh_proxy_process(struct timer_list *t)
1573{
1574 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1575 long sched_next = 0;
1576 unsigned long now = jiffies;
1577 struct sk_buff *skb, *n;
1578
1579 spin_lock(&tbl->proxy_queue.lock);
1580
1581 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1582 long tdif = NEIGH_CB(skb)->sched_next - now;
1583
1584 if (tdif <= 0) {
1585 struct net_device *dev = skb->dev;
1586
1587 neigh_parms_qlen_dec(dev, tbl->family);
1588 __skb_unlink(skb, &tbl->proxy_queue);
1589
1590 if (tbl->proxy_redo && netif_running(dev)) {
1591 rcu_read_lock();
1592 tbl->proxy_redo(skb);
1593 rcu_read_unlock();
1594 } else {
1595 kfree_skb(skb);
1596 }
1597
1598 dev_put(dev);
1599 } else if (!sched_next || tdif < sched_next)
1600 sched_next = tdif;
1601 }
1602 del_timer(&tbl->proxy_timer);
1603 if (sched_next)
1604 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1605 spin_unlock(&tbl->proxy_queue.lock);
1606}
1607
1608static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1609{
1610 /* If proxy_delay is zero, do not call get_random_u32_below()
1611 * as it is undefined behavior.
1612 */
1613 unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1614
1615 return proxy_delay ?
1616 jiffies + get_random_u32_below(proxy_delay) : jiffies;
1617}
1618
1619void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1620 struct sk_buff *skb)
1621{
1622 unsigned long sched_next = neigh_proxy_delay(p);
1623
1624 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1625 kfree_skb(skb);
1626 return;
1627 }
1628
1629 NEIGH_CB(skb)->sched_next = sched_next;
1630 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1631
1632 spin_lock(&tbl->proxy_queue.lock);
1633 if (del_timer(&tbl->proxy_timer)) {
1634 if (time_before(tbl->proxy_timer.expires, sched_next))
1635 sched_next = tbl->proxy_timer.expires;
1636 }
1637 skb_dst_drop(skb);
1638 dev_hold(skb->dev);
1639 __skb_queue_tail(&tbl->proxy_queue, skb);
1640 p->qlen++;
1641 mod_timer(&tbl->proxy_timer, sched_next);
1642 spin_unlock(&tbl->proxy_queue.lock);
1643}
1644EXPORT_SYMBOL(pneigh_enqueue);
1645
1646static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1647 struct net *net, int ifindex)
1648{
1649 struct neigh_parms *p;
1650
1651 list_for_each_entry(p, &tbl->parms_list, list) {
1652 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1653 (!p->dev && !ifindex && net_eq(net, &init_net)))
1654 return p;
1655 }
1656
1657 return NULL;
1658}
1659
1660struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1661 struct neigh_table *tbl)
1662{
1663 struct neigh_parms *p;
1664 struct net *net = dev_net(dev);
1665 const struct net_device_ops *ops = dev->netdev_ops;
1666
1667 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1668 if (p) {
1669 p->tbl = tbl;
1670 refcount_set(&p->refcnt, 1);
1671 p->reachable_time =
1672 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1673 p->qlen = 0;
1674 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1675 p->dev = dev;
1676 write_pnet(&p->net, net);
1677 p->sysctl_table = NULL;
1678
1679 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1680 netdev_put(dev, &p->dev_tracker);
1681 kfree(p);
1682 return NULL;
1683 }
1684
1685 write_lock_bh(&tbl->lock);
1686 list_add(&p->list, &tbl->parms.list);
1687 write_unlock_bh(&tbl->lock);
1688
1689 neigh_parms_data_state_cleanall(p);
1690 }
1691 return p;
1692}
1693EXPORT_SYMBOL(neigh_parms_alloc);
1694
1695static void neigh_rcu_free_parms(struct rcu_head *head)
1696{
1697 struct neigh_parms *parms =
1698 container_of(head, struct neigh_parms, rcu_head);
1699
1700 neigh_parms_put(parms);
1701}
1702
1703void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1704{
1705 if (!parms || parms == &tbl->parms)
1706 return;
1707 write_lock_bh(&tbl->lock);
1708 list_del(&parms->list);
1709 parms->dead = 1;
1710 write_unlock_bh(&tbl->lock);
1711 netdev_put(parms->dev, &parms->dev_tracker);
1712 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1713}
1714EXPORT_SYMBOL(neigh_parms_release);
1715
1716static void neigh_parms_destroy(struct neigh_parms *parms)
1717{
1718 kfree(parms);
1719}
1720
1721static struct lock_class_key neigh_table_proxy_queue_class;
1722
1723static struct neigh_table __rcu *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1724
1725void neigh_table_init(int index, struct neigh_table *tbl)
1726{
1727 unsigned long now = jiffies;
1728 unsigned long phsize;
1729
1730 INIT_LIST_HEAD(&tbl->parms_list);
1731 INIT_LIST_HEAD(&tbl->gc_list);
1732 INIT_LIST_HEAD(&tbl->managed_list);
1733
1734 list_add(&tbl->parms.list, &tbl->parms_list);
1735 write_pnet(&tbl->parms.net, &init_net);
1736 refcount_set(&tbl->parms.refcnt, 1);
1737 tbl->parms.reachable_time =
1738 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1739 tbl->parms.qlen = 0;
1740
1741 tbl->stats = alloc_percpu(struct neigh_statistics);
1742 if (!tbl->stats)
1743 panic("cannot create neighbour cache statistics");
1744
1745#ifdef CONFIG_PROC_FS
1746 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1747 &neigh_stat_seq_ops, tbl))
1748 panic("cannot create neighbour proc dir entry");
1749#endif
1750
1751 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1752
1753 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1754 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1755
1756 if (!tbl->nht || !tbl->phash_buckets)
1757 panic("cannot allocate neighbour cache hashes");
1758
1759 if (!tbl->entry_size)
1760 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1761 tbl->key_len, NEIGH_PRIV_ALIGN);
1762 else
1763 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1764
1765 rwlock_init(&tbl->lock);
1766
1767 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1768 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1769 tbl->parms.reachable_time);
1770 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1771 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1772
1773 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1774 skb_queue_head_init_class(&tbl->proxy_queue,
1775 &neigh_table_proxy_queue_class);
1776
1777 tbl->last_flush = now;
1778 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1779
1780 rcu_assign_pointer(neigh_tables[index], tbl);
1781}
1782EXPORT_SYMBOL(neigh_table_init);
1783
1784/*
1785 * Only called from ndisc_cleanup(), which means this is dead code
1786 * because we no longer can unload IPv6 module.
1787 */
1788int neigh_table_clear(int index, struct neigh_table *tbl)
1789{
1790 RCU_INIT_POINTER(neigh_tables[index], NULL);
1791 synchronize_rcu();
1792
1793 /* It is not clean... Fix it to unload IPv6 module safely */
1794 cancel_delayed_work_sync(&tbl->managed_work);
1795 cancel_delayed_work_sync(&tbl->gc_work);
1796 del_timer_sync(&tbl->proxy_timer);
1797 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1798 neigh_ifdown(tbl, NULL);
1799 if (atomic_read(&tbl->entries))
1800 pr_crit("neighbour leakage\n");
1801
1802 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1803 neigh_hash_free_rcu);
1804 tbl->nht = NULL;
1805
1806 kfree(tbl->phash_buckets);
1807 tbl->phash_buckets = NULL;
1808
1809 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1810
1811 free_percpu(tbl->stats);
1812 tbl->stats = NULL;
1813
1814 return 0;
1815}
1816EXPORT_SYMBOL(neigh_table_clear);
1817
1818static struct neigh_table *neigh_find_table(int family)
1819{
1820 struct neigh_table *tbl = NULL;
1821
1822 switch (family) {
1823 case AF_INET:
1824 tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ARP_TABLE]);
1825 break;
1826 case AF_INET6:
1827 tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ND_TABLE]);
1828 break;
1829 }
1830
1831 return tbl;
1832}
1833
1834const struct nla_policy nda_policy[NDA_MAX+1] = {
1835 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1836 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1837 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1838 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1839 [NDA_PROBES] = { .type = NLA_U32 },
1840 [NDA_VLAN] = { .type = NLA_U16 },
1841 [NDA_PORT] = { .type = NLA_U16 },
1842 [NDA_VNI] = { .type = NLA_U32 },
1843 [NDA_IFINDEX] = { .type = NLA_U32 },
1844 [NDA_MASTER] = { .type = NLA_U32 },
1845 [NDA_PROTOCOL] = { .type = NLA_U8 },
1846 [NDA_NH_ID] = { .type = NLA_U32 },
1847 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1848 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1849};
1850
1851static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1852 struct netlink_ext_ack *extack)
1853{
1854 struct net *net = sock_net(skb->sk);
1855 struct ndmsg *ndm;
1856 struct nlattr *dst_attr;
1857 struct neigh_table *tbl;
1858 struct neighbour *neigh;
1859 struct net_device *dev = NULL;
1860 int err = -EINVAL;
1861
1862 ASSERT_RTNL();
1863 if (nlmsg_len(nlh) < sizeof(*ndm))
1864 goto out;
1865
1866 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1867 if (!dst_attr) {
1868 NL_SET_ERR_MSG(extack, "Network address not specified");
1869 goto out;
1870 }
1871
1872 ndm = nlmsg_data(nlh);
1873 if (ndm->ndm_ifindex) {
1874 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1875 if (dev == NULL) {
1876 err = -ENODEV;
1877 goto out;
1878 }
1879 }
1880
1881 tbl = neigh_find_table(ndm->ndm_family);
1882 if (tbl == NULL)
1883 return -EAFNOSUPPORT;
1884
1885 if (nla_len(dst_attr) < (int)tbl->key_len) {
1886 NL_SET_ERR_MSG(extack, "Invalid network address");
1887 goto out;
1888 }
1889
1890 if (ndm->ndm_flags & NTF_PROXY) {
1891 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1892 goto out;
1893 }
1894
1895 if (dev == NULL)
1896 goto out;
1897
1898 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1899 if (neigh == NULL) {
1900 err = -ENOENT;
1901 goto out;
1902 }
1903
1904 err = __neigh_update(neigh, NULL, NUD_FAILED,
1905 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1906 NETLINK_CB(skb).portid, extack);
1907 write_lock_bh(&tbl->lock);
1908 neigh_release(neigh);
1909 neigh_remove_one(neigh);
1910 write_unlock_bh(&tbl->lock);
1911
1912out:
1913 return err;
1914}
1915
1916static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1917 struct netlink_ext_ack *extack)
1918{
1919 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1920 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1921 struct net *net = sock_net(skb->sk);
1922 struct ndmsg *ndm;
1923 struct nlattr *tb[NDA_MAX+1];
1924 struct neigh_table *tbl;
1925 struct net_device *dev = NULL;
1926 struct neighbour *neigh;
1927 void *dst, *lladdr;
1928 u8 protocol = 0;
1929 u32 ndm_flags;
1930 int err;
1931
1932 ASSERT_RTNL();
1933 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1934 nda_policy, extack);
1935 if (err < 0)
1936 goto out;
1937
1938 err = -EINVAL;
1939 if (!tb[NDA_DST]) {
1940 NL_SET_ERR_MSG(extack, "Network address not specified");
1941 goto out;
1942 }
1943
1944 ndm = nlmsg_data(nlh);
1945 ndm_flags = ndm->ndm_flags;
1946 if (tb[NDA_FLAGS_EXT]) {
1947 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1948
1949 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1950 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1951 hweight32(NTF_EXT_MASK)));
1952 ndm_flags |= (ext << NTF_EXT_SHIFT);
1953 }
1954 if (ndm->ndm_ifindex) {
1955 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1956 if (dev == NULL) {
1957 err = -ENODEV;
1958 goto out;
1959 }
1960
1961 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1962 NL_SET_ERR_MSG(extack, "Invalid link address");
1963 goto out;
1964 }
1965 }
1966
1967 tbl = neigh_find_table(ndm->ndm_family);
1968 if (tbl == NULL)
1969 return -EAFNOSUPPORT;
1970
1971 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1972 NL_SET_ERR_MSG(extack, "Invalid network address");
1973 goto out;
1974 }
1975
1976 dst = nla_data(tb[NDA_DST]);
1977 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1978
1979 if (tb[NDA_PROTOCOL])
1980 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1981 if (ndm_flags & NTF_PROXY) {
1982 struct pneigh_entry *pn;
1983
1984 if (ndm_flags & NTF_MANAGED) {
1985 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
1986 goto out;
1987 }
1988
1989 err = -ENOBUFS;
1990 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1991 if (pn) {
1992 pn->flags = ndm_flags;
1993 if (protocol)
1994 pn->protocol = protocol;
1995 err = 0;
1996 }
1997 goto out;
1998 }
1999
2000 if (!dev) {
2001 NL_SET_ERR_MSG(extack, "Device not specified");
2002 goto out;
2003 }
2004
2005 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2006 err = -EINVAL;
2007 goto out;
2008 }
2009
2010 neigh = neigh_lookup(tbl, dst, dev);
2011 if (neigh == NULL) {
2012 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
2013 bool exempt_from_gc = ndm_permanent ||
2014 ndm_flags & NTF_EXT_LEARNED;
2015
2016 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2017 err = -ENOENT;
2018 goto out;
2019 }
2020 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2021 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2022 err = -EINVAL;
2023 goto out;
2024 }
2025
2026 neigh = ___neigh_create(tbl, dst, dev,
2027 ndm_flags &
2028 (NTF_EXT_LEARNED | NTF_MANAGED),
2029 exempt_from_gc, true);
2030 if (IS_ERR(neigh)) {
2031 err = PTR_ERR(neigh);
2032 goto out;
2033 }
2034 } else {
2035 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2036 err = -EEXIST;
2037 neigh_release(neigh);
2038 goto out;
2039 }
2040
2041 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2042 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2043 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2044 }
2045
2046 if (protocol)
2047 neigh->protocol = protocol;
2048 if (ndm_flags & NTF_EXT_LEARNED)
2049 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2050 if (ndm_flags & NTF_ROUTER)
2051 flags |= NEIGH_UPDATE_F_ISROUTER;
2052 if (ndm_flags & NTF_MANAGED)
2053 flags |= NEIGH_UPDATE_F_MANAGED;
2054 if (ndm_flags & NTF_USE)
2055 flags |= NEIGH_UPDATE_F_USE;
2056
2057 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2058 NETLINK_CB(skb).portid, extack);
2059 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2060 neigh_event_send(neigh, NULL);
2061 err = 0;
2062 }
2063 neigh_release(neigh);
2064out:
2065 return err;
2066}
2067
2068static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2069{
2070 struct nlattr *nest;
2071
2072 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2073 if (nest == NULL)
2074 return -ENOBUFS;
2075
2076 if ((parms->dev &&
2077 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2078 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2079 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2080 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2081 /* approximative value for deprecated QUEUE_LEN (in packets) */
2082 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2083 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2084 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2085 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2086 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2087 NEIGH_VAR(parms, UCAST_PROBES)) ||
2088 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2089 NEIGH_VAR(parms, MCAST_PROBES)) ||
2090 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2091 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2092 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2093 NDTPA_PAD) ||
2094 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2095 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2096 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2097 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2098 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2099 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2100 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2101 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2102 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2103 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2104 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2105 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2106 nla_put_msecs(skb, NDTPA_LOCKTIME,
2107 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2108 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2109 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2110 goto nla_put_failure;
2111 return nla_nest_end(skb, nest);
2112
2113nla_put_failure:
2114 nla_nest_cancel(skb, nest);
2115 return -EMSGSIZE;
2116}
2117
2118static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2119 u32 pid, u32 seq, int type, int flags)
2120{
2121 struct nlmsghdr *nlh;
2122 struct ndtmsg *ndtmsg;
2123
2124 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2125 if (nlh == NULL)
2126 return -EMSGSIZE;
2127
2128 ndtmsg = nlmsg_data(nlh);
2129
2130 read_lock_bh(&tbl->lock);
2131 ndtmsg->ndtm_family = tbl->family;
2132 ndtmsg->ndtm_pad1 = 0;
2133 ndtmsg->ndtm_pad2 = 0;
2134
2135 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2136 nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2137 NDTA_PAD) ||
2138 nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2139 nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2140 nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2141 goto nla_put_failure;
2142 {
2143 unsigned long now = jiffies;
2144 long flush_delta = now - READ_ONCE(tbl->last_flush);
2145 long rand_delta = now - READ_ONCE(tbl->last_rand);
2146 struct neigh_hash_table *nht;
2147 struct ndt_config ndc = {
2148 .ndtc_key_len = tbl->key_len,
2149 .ndtc_entry_size = tbl->entry_size,
2150 .ndtc_entries = atomic_read(&tbl->entries),
2151 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2152 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2153 .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
2154 };
2155
2156 rcu_read_lock();
2157 nht = rcu_dereference(tbl->nht);
2158 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2159 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2160 rcu_read_unlock();
2161
2162 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2163 goto nla_put_failure;
2164 }
2165
2166 {
2167 int cpu;
2168 struct ndt_stats ndst;
2169
2170 memset(&ndst, 0, sizeof(ndst));
2171
2172 for_each_possible_cpu(cpu) {
2173 struct neigh_statistics *st;
2174
2175 st = per_cpu_ptr(tbl->stats, cpu);
2176 ndst.ndts_allocs += READ_ONCE(st->allocs);
2177 ndst.ndts_destroys += READ_ONCE(st->destroys);
2178 ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
2179 ndst.ndts_res_failed += READ_ONCE(st->res_failed);
2180 ndst.ndts_lookups += READ_ONCE(st->lookups);
2181 ndst.ndts_hits += READ_ONCE(st->hits);
2182 ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
2183 ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
2184 ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
2185 ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
2186 ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
2187 }
2188
2189 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2190 NDTA_PAD))
2191 goto nla_put_failure;
2192 }
2193
2194 BUG_ON(tbl->parms.dev);
2195 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2196 goto nla_put_failure;
2197
2198 read_unlock_bh(&tbl->lock);
2199 nlmsg_end(skb, nlh);
2200 return 0;
2201
2202nla_put_failure:
2203 read_unlock_bh(&tbl->lock);
2204 nlmsg_cancel(skb, nlh);
2205 return -EMSGSIZE;
2206}
2207
2208static int neightbl_fill_param_info(struct sk_buff *skb,
2209 struct neigh_table *tbl,
2210 struct neigh_parms *parms,
2211 u32 pid, u32 seq, int type,
2212 unsigned int flags)
2213{
2214 struct ndtmsg *ndtmsg;
2215 struct nlmsghdr *nlh;
2216
2217 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2218 if (nlh == NULL)
2219 return -EMSGSIZE;
2220
2221 ndtmsg = nlmsg_data(nlh);
2222
2223 read_lock_bh(&tbl->lock);
2224 ndtmsg->ndtm_family = tbl->family;
2225 ndtmsg->ndtm_pad1 = 0;
2226 ndtmsg->ndtm_pad2 = 0;
2227
2228 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2229 neightbl_fill_parms(skb, parms) < 0)
2230 goto errout;
2231
2232 read_unlock_bh(&tbl->lock);
2233 nlmsg_end(skb, nlh);
2234 return 0;
2235errout:
2236 read_unlock_bh(&tbl->lock);
2237 nlmsg_cancel(skb, nlh);
2238 return -EMSGSIZE;
2239}
2240
2241static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2242 [NDTA_NAME] = { .type = NLA_STRING },
2243 [NDTA_THRESH1] = { .type = NLA_U32 },
2244 [NDTA_THRESH2] = { .type = NLA_U32 },
2245 [NDTA_THRESH3] = { .type = NLA_U32 },
2246 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2247 [NDTA_PARMS] = { .type = NLA_NESTED },
2248};
2249
2250static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2251 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2252 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2253 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2254 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2255 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2256 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2257 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2258 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2259 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2260 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2261 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2262 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2263 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2264 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2265 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 },
2266};
2267
2268static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2269 struct netlink_ext_ack *extack)
2270{
2271 struct net *net = sock_net(skb->sk);
2272 struct neigh_table *tbl;
2273 struct ndtmsg *ndtmsg;
2274 struct nlattr *tb[NDTA_MAX+1];
2275 bool found = false;
2276 int err, tidx;
2277
2278 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2279 nl_neightbl_policy, extack);
2280 if (err < 0)
2281 goto errout;
2282
2283 if (tb[NDTA_NAME] == NULL) {
2284 err = -EINVAL;
2285 goto errout;
2286 }
2287
2288 ndtmsg = nlmsg_data(nlh);
2289
2290 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2291 tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
2292 if (!tbl)
2293 continue;
2294 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2295 continue;
2296 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2297 found = true;
2298 break;
2299 }
2300 }
2301
2302 if (!found)
2303 return -ENOENT;
2304
2305 /*
2306 * We acquire tbl->lock to be nice to the periodic timers and
2307 * make sure they always see a consistent set of values.
2308 */
2309 write_lock_bh(&tbl->lock);
2310
2311 if (tb[NDTA_PARMS]) {
2312 struct nlattr *tbp[NDTPA_MAX+1];
2313 struct neigh_parms *p;
2314 int i, ifindex = 0;
2315
2316 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2317 tb[NDTA_PARMS],
2318 nl_ntbl_parm_policy, extack);
2319 if (err < 0)
2320 goto errout_tbl_lock;
2321
2322 if (tbp[NDTPA_IFINDEX])
2323 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2324
2325 p = lookup_neigh_parms(tbl, net, ifindex);
2326 if (p == NULL) {
2327 err = -ENOENT;
2328 goto errout_tbl_lock;
2329 }
2330
2331 for (i = 1; i <= NDTPA_MAX; i++) {
2332 if (tbp[i] == NULL)
2333 continue;
2334
2335 switch (i) {
2336 case NDTPA_QUEUE_LEN:
2337 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2338 nla_get_u32(tbp[i]) *
2339 SKB_TRUESIZE(ETH_FRAME_LEN));
2340 break;
2341 case NDTPA_QUEUE_LENBYTES:
2342 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2343 nla_get_u32(tbp[i]));
2344 break;
2345 case NDTPA_PROXY_QLEN:
2346 NEIGH_VAR_SET(p, PROXY_QLEN,
2347 nla_get_u32(tbp[i]));
2348 break;
2349 case NDTPA_APP_PROBES:
2350 NEIGH_VAR_SET(p, APP_PROBES,
2351 nla_get_u32(tbp[i]));
2352 break;
2353 case NDTPA_UCAST_PROBES:
2354 NEIGH_VAR_SET(p, UCAST_PROBES,
2355 nla_get_u32(tbp[i]));
2356 break;
2357 case NDTPA_MCAST_PROBES:
2358 NEIGH_VAR_SET(p, MCAST_PROBES,
2359 nla_get_u32(tbp[i]));
2360 break;
2361 case NDTPA_MCAST_REPROBES:
2362 NEIGH_VAR_SET(p, MCAST_REPROBES,
2363 nla_get_u32(tbp[i]));
2364 break;
2365 case NDTPA_BASE_REACHABLE_TIME:
2366 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2367 nla_get_msecs(tbp[i]));
2368 /* update reachable_time as well, otherwise, the change will
2369 * only be effective after the next time neigh_periodic_work
2370 * decides to recompute it (can be multiple minutes)
2371 */
2372 p->reachable_time =
2373 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2374 break;
2375 case NDTPA_GC_STALETIME:
2376 NEIGH_VAR_SET(p, GC_STALETIME,
2377 nla_get_msecs(tbp[i]));
2378 break;
2379 case NDTPA_DELAY_PROBE_TIME:
2380 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2381 nla_get_msecs(tbp[i]));
2382 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2383 break;
2384 case NDTPA_INTERVAL_PROBE_TIME_MS:
2385 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2386 nla_get_msecs(tbp[i]));
2387 break;
2388 case NDTPA_RETRANS_TIME:
2389 NEIGH_VAR_SET(p, RETRANS_TIME,
2390 nla_get_msecs(tbp[i]));
2391 break;
2392 case NDTPA_ANYCAST_DELAY:
2393 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2394 nla_get_msecs(tbp[i]));
2395 break;
2396 case NDTPA_PROXY_DELAY:
2397 NEIGH_VAR_SET(p, PROXY_DELAY,
2398 nla_get_msecs(tbp[i]));
2399 break;
2400 case NDTPA_LOCKTIME:
2401 NEIGH_VAR_SET(p, LOCKTIME,
2402 nla_get_msecs(tbp[i]));
2403 break;
2404 }
2405 }
2406 }
2407
2408 err = -ENOENT;
2409 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2410 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2411 !net_eq(net, &init_net))
2412 goto errout_tbl_lock;
2413
2414 if (tb[NDTA_THRESH1])
2415 WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2416
2417 if (tb[NDTA_THRESH2])
2418 WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2419
2420 if (tb[NDTA_THRESH3])
2421 WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2422
2423 if (tb[NDTA_GC_INTERVAL])
2424 WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2425
2426 err = 0;
2427
2428errout_tbl_lock:
2429 write_unlock_bh(&tbl->lock);
2430errout:
2431 return err;
2432}
2433
2434static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2435 struct netlink_ext_ack *extack)
2436{
2437 struct ndtmsg *ndtm;
2438
2439 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2440 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2441 return -EINVAL;
2442 }
2443
2444 ndtm = nlmsg_data(nlh);
2445 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2446 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2447 return -EINVAL;
2448 }
2449
2450 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2451 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2452 return -EINVAL;
2453 }
2454
2455 return 0;
2456}
2457
2458static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2459{
2460 const struct nlmsghdr *nlh = cb->nlh;
2461 struct net *net = sock_net(skb->sk);
2462 int family, tidx, nidx = 0;
2463 int tbl_skip = cb->args[0];
2464 int neigh_skip = cb->args[1];
2465 struct neigh_table *tbl;
2466
2467 if (cb->strict_check) {
2468 int err = neightbl_valid_dump_info(nlh, cb->extack);
2469
2470 if (err < 0)
2471 return err;
2472 }
2473
2474 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2475
2476 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2477 struct neigh_parms *p;
2478
2479 tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
2480 if (!tbl)
2481 continue;
2482
2483 if (tidx < tbl_skip || (family && tbl->family != family))
2484 continue;
2485
2486 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2487 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2488 NLM_F_MULTI) < 0)
2489 break;
2490
2491 nidx = 0;
2492 p = list_next_entry(&tbl->parms, list);
2493 list_for_each_entry_from(p, &tbl->parms_list, list) {
2494 if (!net_eq(neigh_parms_net(p), net))
2495 continue;
2496
2497 if (nidx < neigh_skip)
2498 goto next;
2499
2500 if (neightbl_fill_param_info(skb, tbl, p,
2501 NETLINK_CB(cb->skb).portid,
2502 nlh->nlmsg_seq,
2503 RTM_NEWNEIGHTBL,
2504 NLM_F_MULTI) < 0)
2505 goto out;
2506 next:
2507 nidx++;
2508 }
2509
2510 neigh_skip = 0;
2511 }
2512out:
2513 cb->args[0] = tidx;
2514 cb->args[1] = nidx;
2515
2516 return skb->len;
2517}
2518
2519static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2520 u32 pid, u32 seq, int type, unsigned int flags)
2521{
2522 u32 neigh_flags, neigh_flags_ext;
2523 unsigned long now = jiffies;
2524 struct nda_cacheinfo ci;
2525 struct nlmsghdr *nlh;
2526 struct ndmsg *ndm;
2527
2528 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2529 if (nlh == NULL)
2530 return -EMSGSIZE;
2531
2532 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2533 neigh_flags = neigh->flags & NTF_OLD_MASK;
2534
2535 ndm = nlmsg_data(nlh);
2536 ndm->ndm_family = neigh->ops->family;
2537 ndm->ndm_pad1 = 0;
2538 ndm->ndm_pad2 = 0;
2539 ndm->ndm_flags = neigh_flags;
2540 ndm->ndm_type = neigh->type;
2541 ndm->ndm_ifindex = neigh->dev->ifindex;
2542
2543 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2544 goto nla_put_failure;
2545
2546 read_lock_bh(&neigh->lock);
2547 ndm->ndm_state = neigh->nud_state;
2548 if (neigh->nud_state & NUD_VALID) {
2549 char haddr[MAX_ADDR_LEN];
2550
2551 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2552 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2553 read_unlock_bh(&neigh->lock);
2554 goto nla_put_failure;
2555 }
2556 }
2557
2558 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2559 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2560 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2561 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2562 read_unlock_bh(&neigh->lock);
2563
2564 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2565 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2566 goto nla_put_failure;
2567
2568 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2569 goto nla_put_failure;
2570 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2571 goto nla_put_failure;
2572
2573 nlmsg_end(skb, nlh);
2574 return 0;
2575
2576nla_put_failure:
2577 nlmsg_cancel(skb, nlh);
2578 return -EMSGSIZE;
2579}
2580
2581static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2582 u32 pid, u32 seq, int type, unsigned int flags,
2583 struct neigh_table *tbl)
2584{
2585 u32 neigh_flags, neigh_flags_ext;
2586 struct nlmsghdr *nlh;
2587 struct ndmsg *ndm;
2588
2589 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2590 if (nlh == NULL)
2591 return -EMSGSIZE;
2592
2593 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2594 neigh_flags = pn->flags & NTF_OLD_MASK;
2595
2596 ndm = nlmsg_data(nlh);
2597 ndm->ndm_family = tbl->family;
2598 ndm->ndm_pad1 = 0;
2599 ndm->ndm_pad2 = 0;
2600 ndm->ndm_flags = neigh_flags | NTF_PROXY;
2601 ndm->ndm_type = RTN_UNICAST;
2602 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2603 ndm->ndm_state = NUD_NONE;
2604
2605 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2606 goto nla_put_failure;
2607
2608 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2609 goto nla_put_failure;
2610 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2611 goto nla_put_failure;
2612
2613 nlmsg_end(skb, nlh);
2614 return 0;
2615
2616nla_put_failure:
2617 nlmsg_cancel(skb, nlh);
2618 return -EMSGSIZE;
2619}
2620
2621static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2622{
2623 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2624 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2625}
2626
2627static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2628{
2629 struct net_device *master;
2630
2631 if (!master_idx)
2632 return false;
2633
2634 master = dev ? netdev_master_upper_dev_get_rcu(dev) : NULL;
2635
2636 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2637 * invalid value for ifindex to denote "no master".
2638 */
2639 if (master_idx == -1)
2640 return !!master;
2641
2642 if (!master || master->ifindex != master_idx)
2643 return true;
2644
2645 return false;
2646}
2647
2648static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2649{
2650 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2651 return true;
2652
2653 return false;
2654}
2655
2656struct neigh_dump_filter {
2657 int master_idx;
2658 int dev_idx;
2659};
2660
2661static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2662 struct netlink_callback *cb,
2663 struct neigh_dump_filter *filter)
2664{
2665 struct net *net = sock_net(skb->sk);
2666 struct neighbour *n;
2667 int err = 0, h, s_h = cb->args[1];
2668 int idx, s_idx = idx = cb->args[2];
2669 struct neigh_hash_table *nht;
2670 unsigned int flags = NLM_F_MULTI;
2671
2672 if (filter->dev_idx || filter->master_idx)
2673 flags |= NLM_F_DUMP_FILTERED;
2674
2675 nht = rcu_dereference(tbl->nht);
2676
2677 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2678 if (h > s_h)
2679 s_idx = 0;
2680 idx = 0;
2681 neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[h]) {
2682 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2683 goto next;
2684 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2685 neigh_master_filtered(n->dev, filter->master_idx))
2686 goto next;
2687 err = neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2688 cb->nlh->nlmsg_seq,
2689 RTM_NEWNEIGH, flags);
2690 if (err < 0)
2691 goto out;
2692next:
2693 idx++;
2694 }
2695 }
2696out:
2697 cb->args[1] = h;
2698 cb->args[2] = idx;
2699 return err;
2700}
2701
2702static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2703 struct netlink_callback *cb,
2704 struct neigh_dump_filter *filter)
2705{
2706 struct pneigh_entry *n;
2707 struct net *net = sock_net(skb->sk);
2708 int err = 0, h, s_h = cb->args[3];
2709 int idx, s_idx = idx = cb->args[4];
2710 unsigned int flags = NLM_F_MULTI;
2711
2712 if (filter->dev_idx || filter->master_idx)
2713 flags |= NLM_F_DUMP_FILTERED;
2714
2715 read_lock_bh(&tbl->lock);
2716
2717 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2718 if (h > s_h)
2719 s_idx = 0;
2720 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2721 if (idx < s_idx || pneigh_net(n) != net)
2722 goto next;
2723 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2724 neigh_master_filtered(n->dev, filter->master_idx))
2725 goto next;
2726 err = pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2727 cb->nlh->nlmsg_seq,
2728 RTM_NEWNEIGH, flags, tbl);
2729 if (err < 0) {
2730 read_unlock_bh(&tbl->lock);
2731 goto out;
2732 }
2733 next:
2734 idx++;
2735 }
2736 }
2737
2738 read_unlock_bh(&tbl->lock);
2739out:
2740 cb->args[3] = h;
2741 cb->args[4] = idx;
2742 return err;
2743}
2744
2745static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2746 bool strict_check,
2747 struct neigh_dump_filter *filter,
2748 struct netlink_ext_ack *extack)
2749{
2750 struct nlattr *tb[NDA_MAX + 1];
2751 int err, i;
2752
2753 if (strict_check) {
2754 struct ndmsg *ndm;
2755
2756 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2757 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2758 return -EINVAL;
2759 }
2760
2761 ndm = nlmsg_data(nlh);
2762 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2763 ndm->ndm_state || ndm->ndm_type) {
2764 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2765 return -EINVAL;
2766 }
2767
2768 if (ndm->ndm_flags & ~NTF_PROXY) {
2769 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2770 return -EINVAL;
2771 }
2772
2773 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2774 tb, NDA_MAX, nda_policy,
2775 extack);
2776 } else {
2777 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2778 NDA_MAX, nda_policy, extack);
2779 }
2780 if (err < 0)
2781 return err;
2782
2783 for (i = 0; i <= NDA_MAX; ++i) {
2784 if (!tb[i])
2785 continue;
2786
2787 /* all new attributes should require strict_check */
2788 switch (i) {
2789 case NDA_IFINDEX:
2790 filter->dev_idx = nla_get_u32(tb[i]);
2791 break;
2792 case NDA_MASTER:
2793 filter->master_idx = nla_get_u32(tb[i]);
2794 break;
2795 default:
2796 if (strict_check) {
2797 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2798 return -EINVAL;
2799 }
2800 }
2801 }
2802
2803 return 0;
2804}
2805
2806static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2807{
2808 const struct nlmsghdr *nlh = cb->nlh;
2809 struct neigh_dump_filter filter = {};
2810 struct neigh_table *tbl;
2811 int t, family, s_t;
2812 int proxy = 0;
2813 int err;
2814
2815 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2816
2817 /* check for full ndmsg structure presence, family member is
2818 * the same for both structures
2819 */
2820 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2821 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2822 proxy = 1;
2823
2824 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2825 if (err < 0 && cb->strict_check)
2826 return err;
2827 err = 0;
2828
2829 s_t = cb->args[0];
2830
2831 rcu_read_lock();
2832 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2833 tbl = rcu_dereference(neigh_tables[t]);
2834
2835 if (!tbl)
2836 continue;
2837 if (t < s_t || (family && tbl->family != family))
2838 continue;
2839 if (t > s_t)
2840 memset(&cb->args[1], 0, sizeof(cb->args) -
2841 sizeof(cb->args[0]));
2842 if (proxy)
2843 err = pneigh_dump_table(tbl, skb, cb, &filter);
2844 else
2845 err = neigh_dump_table(tbl, skb, cb, &filter);
2846 if (err < 0)
2847 break;
2848 }
2849 rcu_read_unlock();
2850
2851 cb->args[0] = t;
2852 return err;
2853}
2854
2855static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2856 struct neigh_table **tbl,
2857 void **dst, int *dev_idx, u8 *ndm_flags,
2858 struct netlink_ext_ack *extack)
2859{
2860 struct nlattr *tb[NDA_MAX + 1];
2861 struct ndmsg *ndm;
2862 int err, i;
2863
2864 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2865 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2866 return -EINVAL;
2867 }
2868
2869 ndm = nlmsg_data(nlh);
2870 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2871 ndm->ndm_type) {
2872 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2873 return -EINVAL;
2874 }
2875
2876 if (ndm->ndm_flags & ~NTF_PROXY) {
2877 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2878 return -EINVAL;
2879 }
2880
2881 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2882 NDA_MAX, nda_policy, extack);
2883 if (err < 0)
2884 return err;
2885
2886 *ndm_flags = ndm->ndm_flags;
2887 *dev_idx = ndm->ndm_ifindex;
2888 *tbl = neigh_find_table(ndm->ndm_family);
2889 if (*tbl == NULL) {
2890 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2891 return -EAFNOSUPPORT;
2892 }
2893
2894 for (i = 0; i <= NDA_MAX; ++i) {
2895 if (!tb[i])
2896 continue;
2897
2898 switch (i) {
2899 case NDA_DST:
2900 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2901 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2902 return -EINVAL;
2903 }
2904 *dst = nla_data(tb[i]);
2905 break;
2906 default:
2907 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2908 return -EINVAL;
2909 }
2910 }
2911
2912 return 0;
2913}
2914
2915static inline size_t neigh_nlmsg_size(void)
2916{
2917 return NLMSG_ALIGN(sizeof(struct ndmsg))
2918 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2919 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2920 + nla_total_size(sizeof(struct nda_cacheinfo))
2921 + nla_total_size(4) /* NDA_PROBES */
2922 + nla_total_size(4) /* NDA_FLAGS_EXT */
2923 + nla_total_size(1); /* NDA_PROTOCOL */
2924}
2925
2926static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2927 u32 pid, u32 seq)
2928{
2929 struct sk_buff *skb;
2930 int err = 0;
2931
2932 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2933 if (!skb)
2934 return -ENOBUFS;
2935
2936 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2937 if (err) {
2938 kfree_skb(skb);
2939 goto errout;
2940 }
2941
2942 err = rtnl_unicast(skb, net, pid);
2943errout:
2944 return err;
2945}
2946
2947static inline size_t pneigh_nlmsg_size(void)
2948{
2949 return NLMSG_ALIGN(sizeof(struct ndmsg))
2950 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2951 + nla_total_size(4) /* NDA_FLAGS_EXT */
2952 + nla_total_size(1); /* NDA_PROTOCOL */
2953}
2954
2955static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2956 u32 pid, u32 seq, struct neigh_table *tbl)
2957{
2958 struct sk_buff *skb;
2959 int err = 0;
2960
2961 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2962 if (!skb)
2963 return -ENOBUFS;
2964
2965 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2966 if (err) {
2967 kfree_skb(skb);
2968 goto errout;
2969 }
2970
2971 err = rtnl_unicast(skb, net, pid);
2972errout:
2973 return err;
2974}
2975
2976static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2977 struct netlink_ext_ack *extack)
2978{
2979 struct net *net = sock_net(in_skb->sk);
2980 struct net_device *dev = NULL;
2981 struct neigh_table *tbl = NULL;
2982 struct neighbour *neigh;
2983 void *dst = NULL;
2984 u8 ndm_flags = 0;
2985 int dev_idx = 0;
2986 int err;
2987
2988 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2989 extack);
2990 if (err < 0)
2991 return err;
2992
2993 if (dev_idx) {
2994 dev = __dev_get_by_index(net, dev_idx);
2995 if (!dev) {
2996 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2997 return -ENODEV;
2998 }
2999 }
3000
3001 if (!dst) {
3002 NL_SET_ERR_MSG(extack, "Network address not specified");
3003 return -EINVAL;
3004 }
3005
3006 if (ndm_flags & NTF_PROXY) {
3007 struct pneigh_entry *pn;
3008
3009 pn = pneigh_lookup(tbl, net, dst, dev, 0);
3010 if (!pn) {
3011 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3012 return -ENOENT;
3013 }
3014 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3015 nlh->nlmsg_seq, tbl);
3016 }
3017
3018 if (!dev) {
3019 NL_SET_ERR_MSG(extack, "No device specified");
3020 return -EINVAL;
3021 }
3022
3023 neigh = neigh_lookup(tbl, dst, dev);
3024 if (!neigh) {
3025 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3026 return -ENOENT;
3027 }
3028
3029 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3030 nlh->nlmsg_seq);
3031
3032 neigh_release(neigh);
3033
3034 return err;
3035}
3036
3037void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3038{
3039 int chain;
3040 struct neigh_hash_table *nht;
3041
3042 rcu_read_lock();
3043 nht = rcu_dereference(tbl->nht);
3044
3045 read_lock_bh(&tbl->lock); /* avoid resizes */
3046 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3047 struct neighbour *n;
3048
3049 neigh_for_each_in_bucket(n, &nht->hash_heads[chain])
3050 cb(n, cookie);
3051 }
3052 read_unlock_bh(&tbl->lock);
3053 rcu_read_unlock();
3054}
3055EXPORT_SYMBOL(neigh_for_each);
3056
3057/* The tbl->lock must be held as a writer and BH disabled. */
3058void __neigh_for_each_release(struct neigh_table *tbl,
3059 int (*cb)(struct neighbour *))
3060{
3061 struct neigh_hash_table *nht;
3062 int chain;
3063
3064 nht = rcu_dereference_protected(tbl->nht,
3065 lockdep_is_held(&tbl->lock));
3066 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3067 struct hlist_node *tmp;
3068 struct neighbour *n;
3069
3070 neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[chain]) {
3071 int release;
3072
3073 write_lock(&n->lock);
3074 release = cb(n);
3075 if (release) {
3076 hlist_del_rcu(&n->hash);
3077 hlist_del_rcu(&n->dev_list);
3078 neigh_mark_dead(n);
3079 }
3080 write_unlock(&n->lock);
3081 if (release)
3082 neigh_cleanup_and_release(n);
3083 }
3084 }
3085}
3086EXPORT_SYMBOL(__neigh_for_each_release);
3087
3088int neigh_xmit(int index, struct net_device *dev,
3089 const void *addr, struct sk_buff *skb)
3090{
3091 int err = -EAFNOSUPPORT;
3092
3093 if (likely(index < NEIGH_NR_TABLES)) {
3094 struct neigh_table *tbl;
3095 struct neighbour *neigh;
3096
3097 rcu_read_lock();
3098 tbl = rcu_dereference(neigh_tables[index]);
3099 if (!tbl)
3100 goto out_unlock;
3101 if (index == NEIGH_ARP_TABLE) {
3102 u32 key = *((u32 *)addr);
3103
3104 neigh = __ipv4_neigh_lookup_noref(dev, key);
3105 } else {
3106 neigh = __neigh_lookup_noref(tbl, addr, dev);
3107 }
3108 if (!neigh)
3109 neigh = __neigh_create(tbl, addr, dev, false);
3110 err = PTR_ERR(neigh);
3111 if (IS_ERR(neigh)) {
3112 rcu_read_unlock();
3113 goto out_kfree_skb;
3114 }
3115 err = READ_ONCE(neigh->output)(neigh, skb);
3116out_unlock:
3117 rcu_read_unlock();
3118 }
3119 else if (index == NEIGH_LINK_TABLE) {
3120 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3121 addr, NULL, skb->len);
3122 if (err < 0)
3123 goto out_kfree_skb;
3124 err = dev_queue_xmit(skb);
3125 }
3126out:
3127 return err;
3128out_kfree_skb:
3129 kfree_skb(skb);
3130 goto out;
3131}
3132EXPORT_SYMBOL(neigh_xmit);
3133
3134#ifdef CONFIG_PROC_FS
3135
3136static struct neighbour *neigh_get_valid(struct seq_file *seq,
3137 struct neighbour *n,
3138 loff_t *pos)
3139{
3140 struct neigh_seq_state *state = seq->private;
3141 struct net *net = seq_file_net(seq);
3142
3143 if (!net_eq(dev_net(n->dev), net))
3144 return NULL;
3145
3146 if (state->neigh_sub_iter) {
3147 loff_t fakep = 0;
3148 void *v;
3149
3150 v = state->neigh_sub_iter(state, n, pos ? pos : &fakep);
3151 if (!v)
3152 return NULL;
3153 if (pos)
3154 return v;
3155 }
3156
3157 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3158 return n;
3159
3160 if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3161 return n;
3162
3163 return NULL;
3164}
3165
3166static struct neighbour *neigh_get_first(struct seq_file *seq)
3167{
3168 struct neigh_seq_state *state = seq->private;
3169 struct neigh_hash_table *nht = state->nht;
3170 struct neighbour *n, *tmp;
3171
3172 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3173
3174 while (++state->bucket < (1 << nht->hash_shift)) {
3175 neigh_for_each_in_bucket(n, &nht->hash_heads[state->bucket]) {
3176 tmp = neigh_get_valid(seq, n, NULL);
3177 if (tmp)
3178 return tmp;
3179 }
3180 }
3181
3182 return NULL;
3183}
3184
3185static struct neighbour *neigh_get_next(struct seq_file *seq,
3186 struct neighbour *n,
3187 loff_t *pos)
3188{
3189 struct neigh_seq_state *state = seq->private;
3190 struct neighbour *tmp;
3191
3192 if (state->neigh_sub_iter) {
3193 void *v = state->neigh_sub_iter(state, n, pos);
3194
3195 if (v)
3196 return n;
3197 }
3198
3199 hlist_for_each_entry_continue(n, hash) {
3200 tmp = neigh_get_valid(seq, n, pos);
3201 if (tmp) {
3202 n = tmp;
3203 goto out;
3204 }
3205 }
3206
3207 n = neigh_get_first(seq);
3208out:
3209 if (n && pos)
3210 --(*pos);
3211
3212 return n;
3213}
3214
3215static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3216{
3217 struct neighbour *n = neigh_get_first(seq);
3218
3219 if (n) {
3220 --(*pos);
3221 while (*pos) {
3222 n = neigh_get_next(seq, n, pos);
3223 if (!n)
3224 break;
3225 }
3226 }
3227 return *pos ? NULL : n;
3228}
3229
3230static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3231{
3232 struct neigh_seq_state *state = seq->private;
3233 struct net *net = seq_file_net(seq);
3234 struct neigh_table *tbl = state->tbl;
3235 struct pneigh_entry *pn = NULL;
3236 int bucket;
3237
3238 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3239 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3240 pn = tbl->phash_buckets[bucket];
3241 while (pn && !net_eq(pneigh_net(pn), net))
3242 pn = pn->next;
3243 if (pn)
3244 break;
3245 }
3246 state->bucket = bucket;
3247
3248 return pn;
3249}
3250
3251static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3252 struct pneigh_entry *pn,
3253 loff_t *pos)
3254{
3255 struct neigh_seq_state *state = seq->private;
3256 struct net *net = seq_file_net(seq);
3257 struct neigh_table *tbl = state->tbl;
3258
3259 do {
3260 pn = pn->next;
3261 } while (pn && !net_eq(pneigh_net(pn), net));
3262
3263 while (!pn) {
3264 if (++state->bucket > PNEIGH_HASHMASK)
3265 break;
3266 pn = tbl->phash_buckets[state->bucket];
3267 while (pn && !net_eq(pneigh_net(pn), net))
3268 pn = pn->next;
3269 if (pn)
3270 break;
3271 }
3272
3273 if (pn && pos)
3274 --(*pos);
3275
3276 return pn;
3277}
3278
3279static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3280{
3281 struct pneigh_entry *pn = pneigh_get_first(seq);
3282
3283 if (pn) {
3284 --(*pos);
3285 while (*pos) {
3286 pn = pneigh_get_next(seq, pn, pos);
3287 if (!pn)
3288 break;
3289 }
3290 }
3291 return *pos ? NULL : pn;
3292}
3293
3294static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3295{
3296 struct neigh_seq_state *state = seq->private;
3297 void *rc;
3298 loff_t idxpos = *pos;
3299
3300 rc = neigh_get_idx(seq, &idxpos);
3301 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3302 rc = pneigh_get_idx(seq, &idxpos);
3303
3304 return rc;
3305}
3306
3307void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3308 __acquires(tbl->lock)
3309 __acquires(rcu)
3310{
3311 struct neigh_seq_state *state = seq->private;
3312
3313 state->tbl = tbl;
3314 state->bucket = -1;
3315 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3316
3317 rcu_read_lock();
3318 state->nht = rcu_dereference(tbl->nht);
3319 read_lock_bh(&tbl->lock);
3320
3321 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3322}
3323EXPORT_SYMBOL(neigh_seq_start);
3324
3325void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3326{
3327 struct neigh_seq_state *state;
3328 void *rc;
3329
3330 if (v == SEQ_START_TOKEN) {
3331 rc = neigh_get_first(seq);
3332 goto out;
3333 }
3334
3335 state = seq->private;
3336 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3337 rc = neigh_get_next(seq, v, NULL);
3338 if (rc)
3339 goto out;
3340 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3341 rc = pneigh_get_first(seq);
3342 } else {
3343 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3344 rc = pneigh_get_next(seq, v, NULL);
3345 }
3346out:
3347 ++(*pos);
3348 return rc;
3349}
3350EXPORT_SYMBOL(neigh_seq_next);
3351
3352void neigh_seq_stop(struct seq_file *seq, void *v)
3353 __releases(tbl->lock)
3354 __releases(rcu)
3355{
3356 struct neigh_seq_state *state = seq->private;
3357 struct neigh_table *tbl = state->tbl;
3358
3359 read_unlock_bh(&tbl->lock);
3360 rcu_read_unlock();
3361}
3362EXPORT_SYMBOL(neigh_seq_stop);
3363
3364/* statistics via seq_file */
3365
3366static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3367{
3368 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3369 int cpu;
3370
3371 if (*pos == 0)
3372 return SEQ_START_TOKEN;
3373
3374 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3375 if (!cpu_possible(cpu))
3376 continue;
3377 *pos = cpu+1;
3378 return per_cpu_ptr(tbl->stats, cpu);
3379 }
3380 return NULL;
3381}
3382
3383static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3384{
3385 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3386 int cpu;
3387
3388 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3389 if (!cpu_possible(cpu))
3390 continue;
3391 *pos = cpu+1;
3392 return per_cpu_ptr(tbl->stats, cpu);
3393 }
3394 (*pos)++;
3395 return NULL;
3396}
3397
3398static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3399{
3400
3401}
3402
3403static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3404{
3405 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3406 struct neigh_statistics *st = v;
3407
3408 if (v == SEQ_START_TOKEN) {
3409 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3410 return 0;
3411 }
3412
3413 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3414 "%08lx %08lx %08lx "
3415 "%08lx %08lx %08lx\n",
3416 atomic_read(&tbl->entries),
3417
3418 st->allocs,
3419 st->destroys,
3420 st->hash_grows,
3421
3422 st->lookups,
3423 st->hits,
3424
3425 st->res_failed,
3426
3427 st->rcv_probes_mcast,
3428 st->rcv_probes_ucast,
3429
3430 st->periodic_gc_runs,
3431 st->forced_gc_runs,
3432 st->unres_discards,
3433 st->table_fulls
3434 );
3435
3436 return 0;
3437}
3438
3439static const struct seq_operations neigh_stat_seq_ops = {
3440 .start = neigh_stat_seq_start,
3441 .next = neigh_stat_seq_next,
3442 .stop = neigh_stat_seq_stop,
3443 .show = neigh_stat_seq_show,
3444};
3445#endif /* CONFIG_PROC_FS */
3446
3447static void __neigh_notify(struct neighbour *n, int type, int flags,
3448 u32 pid)
3449{
3450 struct sk_buff *skb;
3451 int err = -ENOBUFS;
3452 struct net *net;
3453
3454 rcu_read_lock();
3455 net = dev_net_rcu(n->dev);
3456 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3457 if (skb == NULL)
3458 goto errout;
3459
3460 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3461 if (err < 0) {
3462 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3463 WARN_ON(err == -EMSGSIZE);
3464 kfree_skb(skb);
3465 goto errout;
3466 }
3467 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3468 goto out;
3469errout:
3470 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3471out:
3472 rcu_read_unlock();
3473}
3474
3475void neigh_app_ns(struct neighbour *n)
3476{
3477 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3478}
3479EXPORT_SYMBOL(neigh_app_ns);
3480
3481#ifdef CONFIG_SYSCTL
3482static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3483
3484static int proc_unres_qlen(const struct ctl_table *ctl, int write,
3485 void *buffer, size_t *lenp, loff_t *ppos)
3486{
3487 int size, ret;
3488 struct ctl_table tmp = *ctl;
3489
3490 tmp.extra1 = SYSCTL_ZERO;
3491 tmp.extra2 = &unres_qlen_max;
3492 tmp.data = &size;
3493
3494 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3495 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3496
3497 if (write && !ret)
3498 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3499 return ret;
3500}
3501
3502static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3503 int index)
3504{
3505 struct net_device *dev;
3506 int family = neigh_parms_family(p);
3507
3508 rcu_read_lock();
3509 for_each_netdev_rcu(net, dev) {
3510 struct neigh_parms *dst_p =
3511 neigh_get_dev_parms_rcu(dev, family);
3512
3513 if (dst_p && !test_bit(index, dst_p->data_state))
3514 dst_p->data[index] = p->data[index];
3515 }
3516 rcu_read_unlock();
3517}
3518
3519static void neigh_proc_update(const struct ctl_table *ctl, int write)
3520{
3521 struct net_device *dev = ctl->extra1;
3522 struct neigh_parms *p = ctl->extra2;
3523 struct net *net = neigh_parms_net(p);
3524 int index = (int *) ctl->data - p->data;
3525
3526 if (!write)
3527 return;
3528
3529 set_bit(index, p->data_state);
3530 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3531 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3532 if (!dev) /* NULL dev means this is default value */
3533 neigh_copy_dflt_parms(net, p, index);
3534}
3535
3536static int neigh_proc_dointvec_zero_intmax(const struct ctl_table *ctl, int write,
3537 void *buffer, size_t *lenp,
3538 loff_t *ppos)
3539{
3540 struct ctl_table tmp = *ctl;
3541 int ret;
3542
3543 tmp.extra1 = SYSCTL_ZERO;
3544 tmp.extra2 = SYSCTL_INT_MAX;
3545
3546 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3547 neigh_proc_update(ctl, write);
3548 return ret;
3549}
3550
3551static int neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table *ctl, int write,
3552 void *buffer, size_t *lenp, loff_t *ppos)
3553{
3554 struct ctl_table tmp = *ctl;
3555 int ret;
3556
3557 int min = msecs_to_jiffies(1);
3558
3559 tmp.extra1 = &min;
3560 tmp.extra2 = NULL;
3561
3562 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3563 neigh_proc_update(ctl, write);
3564 return ret;
3565}
3566
3567int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer,
3568 size_t *lenp, loff_t *ppos)
3569{
3570 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3571
3572 neigh_proc_update(ctl, write);
3573 return ret;
3574}
3575EXPORT_SYMBOL(neigh_proc_dointvec);
3576
3577int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer,
3578 size_t *lenp, loff_t *ppos)
3579{
3580 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3581
3582 neigh_proc_update(ctl, write);
3583 return ret;
3584}
3585EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3586
3587static int neigh_proc_dointvec_userhz_jiffies(const struct ctl_table *ctl, int write,
3588 void *buffer, size_t *lenp,
3589 loff_t *ppos)
3590{
3591 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3592
3593 neigh_proc_update(ctl, write);
3594 return ret;
3595}
3596
3597int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
3598 void *buffer, size_t *lenp, loff_t *ppos)
3599{
3600 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3601
3602 neigh_proc_update(ctl, write);
3603 return ret;
3604}
3605EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3606
3607static int neigh_proc_dointvec_unres_qlen(const struct ctl_table *ctl, int write,
3608 void *buffer, size_t *lenp,
3609 loff_t *ppos)
3610{
3611 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3612
3613 neigh_proc_update(ctl, write);
3614 return ret;
3615}
3616
3617static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write,
3618 void *buffer, size_t *lenp,
3619 loff_t *ppos)
3620{
3621 struct neigh_parms *p = ctl->extra2;
3622 int ret;
3623
3624 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3625 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3626 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3627 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3628 else
3629 ret = -1;
3630
3631 if (write && ret == 0) {
3632 /* update reachable_time as well, otherwise, the change will
3633 * only be effective after the next time neigh_periodic_work
3634 * decides to recompute it
3635 */
3636 p->reachable_time =
3637 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3638 }
3639 return ret;
3640}
3641
3642#define NEIGH_PARMS_DATA_OFFSET(index) \
3643 (&((struct neigh_parms *) 0)->data[index])
3644
3645#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3646 [NEIGH_VAR_ ## attr] = { \
3647 .procname = name, \
3648 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3649 .maxlen = sizeof(int), \
3650 .mode = mval, \
3651 .proc_handler = proc, \
3652 }
3653
3654#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3655 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3656
3657#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3658 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3659
3660#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3661 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3662
3663#define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3664 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3665
3666#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3667 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3668
3669#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3670 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3671
3672static struct neigh_sysctl_table {
3673 struct ctl_table_header *sysctl_header;
3674 struct ctl_table neigh_vars[NEIGH_VAR_MAX];
3675} neigh_sysctl_template __read_mostly = {
3676 .neigh_vars = {
3677 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3678 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3679 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3680 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3681 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3682 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3683 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3684 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3685 "interval_probe_time_ms"),
3686 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3687 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3688 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3689 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3690 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3691 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3692 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3693 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3694 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3695 [NEIGH_VAR_GC_INTERVAL] = {
3696 .procname = "gc_interval",
3697 .maxlen = sizeof(int),
3698 .mode = 0644,
3699 .proc_handler = proc_dointvec_jiffies,
3700 },
3701 [NEIGH_VAR_GC_THRESH1] = {
3702 .procname = "gc_thresh1",
3703 .maxlen = sizeof(int),
3704 .mode = 0644,
3705 .extra1 = SYSCTL_ZERO,
3706 .extra2 = SYSCTL_INT_MAX,
3707 .proc_handler = proc_dointvec_minmax,
3708 },
3709 [NEIGH_VAR_GC_THRESH2] = {
3710 .procname = "gc_thresh2",
3711 .maxlen = sizeof(int),
3712 .mode = 0644,
3713 .extra1 = SYSCTL_ZERO,
3714 .extra2 = SYSCTL_INT_MAX,
3715 .proc_handler = proc_dointvec_minmax,
3716 },
3717 [NEIGH_VAR_GC_THRESH3] = {
3718 .procname = "gc_thresh3",
3719 .maxlen = sizeof(int),
3720 .mode = 0644,
3721 .extra1 = SYSCTL_ZERO,
3722 .extra2 = SYSCTL_INT_MAX,
3723 .proc_handler = proc_dointvec_minmax,
3724 },
3725 },
3726};
3727
3728int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3729 proc_handler *handler)
3730{
3731 int i;
3732 struct neigh_sysctl_table *t;
3733 const char *dev_name_source;
3734 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3735 char *p_name;
3736 size_t neigh_vars_size;
3737
3738 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3739 if (!t)
3740 goto err;
3741
3742 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3743 t->neigh_vars[i].data += (long) p;
3744 t->neigh_vars[i].extra1 = dev;
3745 t->neigh_vars[i].extra2 = p;
3746 }
3747
3748 neigh_vars_size = ARRAY_SIZE(t->neigh_vars);
3749 if (dev) {
3750 dev_name_source = dev->name;
3751 /* Terminate the table early */
3752 neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
3753 } else {
3754 struct neigh_table *tbl = p->tbl;
3755 dev_name_source = "default";
3756 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3757 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3758 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3759 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3760 }
3761
3762 if (handler) {
3763 /* RetransTime */
3764 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3765 /* ReachableTime */
3766 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3767 /* RetransTime (in milliseconds)*/
3768 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3769 /* ReachableTime (in milliseconds) */
3770 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3771 } else {
3772 /* Those handlers will update p->reachable_time after
3773 * base_reachable_time(_ms) is set to ensure the new timer starts being
3774 * applied after the next neighbour update instead of waiting for
3775 * neigh_periodic_work to update its value (can be multiple minutes)
3776 * So any handler that replaces them should do this as well
3777 */
3778 /* ReachableTime */
3779 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3780 neigh_proc_base_reachable_time;
3781 /* ReachableTime (in milliseconds) */
3782 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3783 neigh_proc_base_reachable_time;
3784 }
3785
3786 switch (neigh_parms_family(p)) {
3787 case AF_INET:
3788 p_name = "ipv4";
3789 break;
3790 case AF_INET6:
3791 p_name = "ipv6";
3792 break;
3793 default:
3794 BUG();
3795 }
3796
3797 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3798 p_name, dev_name_source);
3799 t->sysctl_header = register_net_sysctl_sz(neigh_parms_net(p),
3800 neigh_path, t->neigh_vars,
3801 neigh_vars_size);
3802 if (!t->sysctl_header)
3803 goto free;
3804
3805 p->sysctl_table = t;
3806 return 0;
3807
3808free:
3809 kfree(t);
3810err:
3811 return -ENOBUFS;
3812}
3813EXPORT_SYMBOL(neigh_sysctl_register);
3814
3815void neigh_sysctl_unregister(struct neigh_parms *p)
3816{
3817 if (p->sysctl_table) {
3818 struct neigh_sysctl_table *t = p->sysctl_table;
3819 p->sysctl_table = NULL;
3820 unregister_net_sysctl_table(t->sysctl_header);
3821 kfree(t);
3822 }
3823}
3824EXPORT_SYMBOL(neigh_sysctl_unregister);
3825
3826#endif /* CONFIG_SYSCTL */
3827
3828static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = {
3829 {.msgtype = RTM_NEWNEIGH, .doit = neigh_add},
3830 {.msgtype = RTM_DELNEIGH, .doit = neigh_delete},
3831 {.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info,
3832 .flags = RTNL_FLAG_DUMP_UNLOCKED},
3833 {.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info},
3834 {.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set},
3835};
3836
3837static int __init neigh_init(void)
3838{
3839 rtnl_register_many(neigh_rtnl_msg_handlers);
3840 return 0;
3841}
3842
3843subsys_initcall(neigh_init);