Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/slab.h>
17#include <linux/kmemleak.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
22#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
28#include <net/net_namespace.h>
29#include <net/neighbour.h>
30#include <net/arp.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/netevent.h>
34#include <net/netlink.h>
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
37#include <linux/string.h>
38#include <linux/log2.h>
39#include <linux/inetdevice.h>
40#include <net/addrconf.h>
41
42#include <trace/events/neigh.h>
43
44#define NEIGH_DEBUG 1
45#define neigh_dbg(level, fmt, ...) \
46do { \
47 if (level <= NEIGH_DEBUG) \
48 pr_debug(fmt, ##__VA_ARGS__); \
49} while (0)
50
51#define PNEIGH_HASHMASK 0xF
52
53static void neigh_timer_handler(struct timer_list *t);
54static void __neigh_notify(struct neighbour *n, int type, int flags,
55 u32 pid);
56static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 struct net_device *dev);
59
60#ifdef CONFIG_PROC_FS
61static const struct seq_operations neigh_stat_seq_ops;
62#endif
63
64/*
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
66
67 - All the scans/updates to hash buckets MUST be made under this lock.
68 - NOTHING clever should be made under this lock: no callbacks
69 to protocol backends, no attempts to send something to network.
70 It will result in deadlocks, if backend/driver wants to use neighbour
71 cache.
72 - If the entry requires some non-trivial actions, increase
73 its reference count and release table lock.
74
75 Neighbour entries are protected:
76 - with reference count.
77 - with rwlock neigh->lock
78
79 Reference count prevents destruction.
80
81 neigh->lock mainly serializes ll address data and its validity state.
82 However, the same lock is used to protect another entry fields:
83 - timer
84 - resolution queue
85
86 Again, nothing clever shall be made under neigh->lock,
87 the most complicated procedure, which we allow is dev->hard_header.
88 It is supposed, that dev->hard_header is simplistic and does
89 not make callbacks to neighbour tables.
90 */
91
92static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93{
94 kfree_skb(skb);
95 return -ENETDOWN;
96}
97
98static void neigh_cleanup_and_release(struct neighbour *neigh)
99{
100 trace_neigh_cleanup_and_release(neigh, 0);
101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 neigh_release(neigh);
104}
105
106/*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
112unsigned long neigh_rand_reach_time(unsigned long base)
113{
114 return base ? (prandom_u32() % base) + (base >> 1) : 0;
115}
116EXPORT_SYMBOL(neigh_rand_reach_time);
117
118static void neigh_mark_dead(struct neighbour *n)
119{
120 n->dead = 1;
121 if (!list_empty(&n->gc_list)) {
122 list_del_init(&n->gc_list);
123 atomic_dec(&n->tbl->gc_entries);
124 }
125}
126
127static void neigh_update_gc_list(struct neighbour *n)
128{
129 bool on_gc_list, exempt_from_gc;
130
131 write_lock_bh(&n->tbl->lock);
132 write_lock(&n->lock);
133
134 if (n->dead)
135 goto out;
136
137 /* remove from the gc list if new state is permanent or if neighbor
138 * is externally learned; otherwise entry should be on the gc list
139 */
140 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
141 n->flags & NTF_EXT_LEARNED;
142 on_gc_list = !list_empty(&n->gc_list);
143
144 if (exempt_from_gc && on_gc_list) {
145 list_del_init(&n->gc_list);
146 atomic_dec(&n->tbl->gc_entries);
147 } else if (!exempt_from_gc && !on_gc_list) {
148 /* add entries to the tail; cleaning removes from the front */
149 list_add_tail(&n->gc_list, &n->tbl->gc_list);
150 atomic_inc(&n->tbl->gc_entries);
151 }
152
153out:
154 write_unlock(&n->lock);
155 write_unlock_bh(&n->tbl->lock);
156}
157
158static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
159 int *notify)
160{
161 bool rc = false;
162 u8 ndm_flags;
163
164 if (!(flags & NEIGH_UPDATE_F_ADMIN))
165 return rc;
166
167 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
168 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
169 if (ndm_flags & NTF_EXT_LEARNED)
170 neigh->flags |= NTF_EXT_LEARNED;
171 else
172 neigh->flags &= ~NTF_EXT_LEARNED;
173 rc = true;
174 *notify = 1;
175 }
176
177 return rc;
178}
179
180static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
181 struct neigh_table *tbl)
182{
183 bool retval = false;
184
185 write_lock(&n->lock);
186 if (refcount_read(&n->refcnt) == 1) {
187 struct neighbour *neigh;
188
189 neigh = rcu_dereference_protected(n->next,
190 lockdep_is_held(&tbl->lock));
191 rcu_assign_pointer(*np, neigh);
192 neigh_mark_dead(n);
193 retval = true;
194 }
195 write_unlock(&n->lock);
196 if (retval)
197 neigh_cleanup_and_release(n);
198 return retval;
199}
200
201bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
202{
203 struct neigh_hash_table *nht;
204 void *pkey = ndel->primary_key;
205 u32 hash_val;
206 struct neighbour *n;
207 struct neighbour __rcu **np;
208
209 nht = rcu_dereference_protected(tbl->nht,
210 lockdep_is_held(&tbl->lock));
211 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
212 hash_val = hash_val >> (32 - nht->hash_shift);
213
214 np = &nht->hash_buckets[hash_val];
215 while ((n = rcu_dereference_protected(*np,
216 lockdep_is_held(&tbl->lock)))) {
217 if (n == ndel)
218 return neigh_del(n, np, tbl);
219 np = &n->next;
220 }
221 return false;
222}
223
224static int neigh_forced_gc(struct neigh_table *tbl)
225{
226 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
227 unsigned long tref = jiffies - 5 * HZ;
228 struct neighbour *n, *tmp;
229 int shrunk = 0;
230
231 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
232
233 write_lock_bh(&tbl->lock);
234
235 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
236 if (refcount_read(&n->refcnt) == 1) {
237 bool remove = false;
238
239 write_lock(&n->lock);
240 if ((n->nud_state == NUD_FAILED) ||
241 (n->nud_state == NUD_NOARP) ||
242 (tbl->is_multicast &&
243 tbl->is_multicast(n->primary_key)) ||
244 time_after(tref, n->updated))
245 remove = true;
246 write_unlock(&n->lock);
247
248 if (remove && neigh_remove_one(n, tbl))
249 shrunk++;
250 if (shrunk >= max_clean)
251 break;
252 }
253 }
254
255 tbl->last_flush = jiffies;
256
257 write_unlock_bh(&tbl->lock);
258
259 return shrunk;
260}
261
262static void neigh_add_timer(struct neighbour *n, unsigned long when)
263{
264 neigh_hold(n);
265 if (unlikely(mod_timer(&n->timer, when))) {
266 printk("NEIGH: BUG, double timer add, state is %x\n",
267 n->nud_state);
268 dump_stack();
269 }
270}
271
272static int neigh_del_timer(struct neighbour *n)
273{
274 if ((n->nud_state & NUD_IN_TIMER) &&
275 del_timer(&n->timer)) {
276 neigh_release(n);
277 return 1;
278 }
279 return 0;
280}
281
282static void pneigh_queue_purge(struct sk_buff_head *list)
283{
284 struct sk_buff *skb;
285
286 while ((skb = skb_dequeue(list)) != NULL) {
287 dev_put(skb->dev);
288 kfree_skb(skb);
289 }
290}
291
292static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
293 bool skip_perm)
294{
295 int i;
296 struct neigh_hash_table *nht;
297
298 nht = rcu_dereference_protected(tbl->nht,
299 lockdep_is_held(&tbl->lock));
300
301 for (i = 0; i < (1 << nht->hash_shift); i++) {
302 struct neighbour *n;
303 struct neighbour __rcu **np = &nht->hash_buckets[i];
304
305 while ((n = rcu_dereference_protected(*np,
306 lockdep_is_held(&tbl->lock))) != NULL) {
307 if (dev && n->dev != dev) {
308 np = &n->next;
309 continue;
310 }
311 if (skip_perm && n->nud_state & NUD_PERMANENT) {
312 np = &n->next;
313 continue;
314 }
315 rcu_assign_pointer(*np,
316 rcu_dereference_protected(n->next,
317 lockdep_is_held(&tbl->lock)));
318 write_lock(&n->lock);
319 neigh_del_timer(n);
320 neigh_mark_dead(n);
321 if (refcount_read(&n->refcnt) != 1) {
322 /* The most unpleasant situation.
323 We must destroy neighbour entry,
324 but someone still uses it.
325
326 The destroy will be delayed until
327 the last user releases us, but
328 we must kill timers etc. and move
329 it to safe state.
330 */
331 __skb_queue_purge(&n->arp_queue);
332 n->arp_queue_len_bytes = 0;
333 n->output = neigh_blackhole;
334 if (n->nud_state & NUD_VALID)
335 n->nud_state = NUD_NOARP;
336 else
337 n->nud_state = NUD_NONE;
338 neigh_dbg(2, "neigh %p is stray\n", n);
339 }
340 write_unlock(&n->lock);
341 neigh_cleanup_and_release(n);
342 }
343 }
344}
345
346void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
347{
348 write_lock_bh(&tbl->lock);
349 neigh_flush_dev(tbl, dev, false);
350 write_unlock_bh(&tbl->lock);
351}
352EXPORT_SYMBOL(neigh_changeaddr);
353
354static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
355 bool skip_perm)
356{
357 write_lock_bh(&tbl->lock);
358 neigh_flush_dev(tbl, dev, skip_perm);
359 pneigh_ifdown_and_unlock(tbl, dev);
360
361 del_timer_sync(&tbl->proxy_timer);
362 pneigh_queue_purge(&tbl->proxy_queue);
363 return 0;
364}
365
366int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
367{
368 __neigh_ifdown(tbl, dev, true);
369 return 0;
370}
371EXPORT_SYMBOL(neigh_carrier_down);
372
373int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
374{
375 __neigh_ifdown(tbl, dev, false);
376 return 0;
377}
378EXPORT_SYMBOL(neigh_ifdown);
379
380static struct neighbour *neigh_alloc(struct neigh_table *tbl,
381 struct net_device *dev,
382 bool exempt_from_gc)
383{
384 struct neighbour *n = NULL;
385 unsigned long now = jiffies;
386 int entries;
387
388 if (exempt_from_gc)
389 goto do_alloc;
390
391 entries = atomic_inc_return(&tbl->gc_entries) - 1;
392 if (entries >= tbl->gc_thresh3 ||
393 (entries >= tbl->gc_thresh2 &&
394 time_after(now, tbl->last_flush + 5 * HZ))) {
395 if (!neigh_forced_gc(tbl) &&
396 entries >= tbl->gc_thresh3) {
397 net_info_ratelimited("%s: neighbor table overflow!\n",
398 tbl->id);
399 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
400 goto out_entries;
401 }
402 }
403
404do_alloc:
405 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
406 if (!n)
407 goto out_entries;
408
409 __skb_queue_head_init(&n->arp_queue);
410 rwlock_init(&n->lock);
411 seqlock_init(&n->ha_lock);
412 n->updated = n->used = now;
413 n->nud_state = NUD_NONE;
414 n->output = neigh_blackhole;
415 seqlock_init(&n->hh.hh_lock);
416 n->parms = neigh_parms_clone(&tbl->parms);
417 timer_setup(&n->timer, neigh_timer_handler, 0);
418
419 NEIGH_CACHE_STAT_INC(tbl, allocs);
420 n->tbl = tbl;
421 refcount_set(&n->refcnt, 1);
422 n->dead = 1;
423 INIT_LIST_HEAD(&n->gc_list);
424
425 atomic_inc(&tbl->entries);
426out:
427 return n;
428
429out_entries:
430 if (!exempt_from_gc)
431 atomic_dec(&tbl->gc_entries);
432 goto out;
433}
434
435static void neigh_get_hash_rnd(u32 *x)
436{
437 *x = get_random_u32() | 1;
438}
439
440static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
441{
442 size_t size = (1 << shift) * sizeof(struct neighbour *);
443 struct neigh_hash_table *ret;
444 struct neighbour __rcu **buckets;
445 int i;
446
447 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
448 if (!ret)
449 return NULL;
450 if (size <= PAGE_SIZE) {
451 buckets = kzalloc(size, GFP_ATOMIC);
452 } else {
453 buckets = (struct neighbour __rcu **)
454 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
455 get_order(size));
456 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
457 }
458 if (!buckets) {
459 kfree(ret);
460 return NULL;
461 }
462 ret->hash_buckets = buckets;
463 ret->hash_shift = shift;
464 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
465 neigh_get_hash_rnd(&ret->hash_rnd[i]);
466 return ret;
467}
468
469static void neigh_hash_free_rcu(struct rcu_head *head)
470{
471 struct neigh_hash_table *nht = container_of(head,
472 struct neigh_hash_table,
473 rcu);
474 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
475 struct neighbour __rcu **buckets = nht->hash_buckets;
476
477 if (size <= PAGE_SIZE) {
478 kfree(buckets);
479 } else {
480 kmemleak_free(buckets);
481 free_pages((unsigned long)buckets, get_order(size));
482 }
483 kfree(nht);
484}
485
486static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
487 unsigned long new_shift)
488{
489 unsigned int i, hash;
490 struct neigh_hash_table *new_nht, *old_nht;
491
492 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
493
494 old_nht = rcu_dereference_protected(tbl->nht,
495 lockdep_is_held(&tbl->lock));
496 new_nht = neigh_hash_alloc(new_shift);
497 if (!new_nht)
498 return old_nht;
499
500 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
501 struct neighbour *n, *next;
502
503 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
504 lockdep_is_held(&tbl->lock));
505 n != NULL;
506 n = next) {
507 hash = tbl->hash(n->primary_key, n->dev,
508 new_nht->hash_rnd);
509
510 hash >>= (32 - new_nht->hash_shift);
511 next = rcu_dereference_protected(n->next,
512 lockdep_is_held(&tbl->lock));
513
514 rcu_assign_pointer(n->next,
515 rcu_dereference_protected(
516 new_nht->hash_buckets[hash],
517 lockdep_is_held(&tbl->lock)));
518 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
519 }
520 }
521
522 rcu_assign_pointer(tbl->nht, new_nht);
523 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
524 return new_nht;
525}
526
527struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
528 struct net_device *dev)
529{
530 struct neighbour *n;
531
532 NEIGH_CACHE_STAT_INC(tbl, lookups);
533
534 rcu_read_lock_bh();
535 n = __neigh_lookup_noref(tbl, pkey, dev);
536 if (n) {
537 if (!refcount_inc_not_zero(&n->refcnt))
538 n = NULL;
539 NEIGH_CACHE_STAT_INC(tbl, hits);
540 }
541
542 rcu_read_unlock_bh();
543 return n;
544}
545EXPORT_SYMBOL(neigh_lookup);
546
547struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
548 const void *pkey)
549{
550 struct neighbour *n;
551 unsigned int key_len = tbl->key_len;
552 u32 hash_val;
553 struct neigh_hash_table *nht;
554
555 NEIGH_CACHE_STAT_INC(tbl, lookups);
556
557 rcu_read_lock_bh();
558 nht = rcu_dereference_bh(tbl->nht);
559 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
560
561 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
562 n != NULL;
563 n = rcu_dereference_bh(n->next)) {
564 if (!memcmp(n->primary_key, pkey, key_len) &&
565 net_eq(dev_net(n->dev), net)) {
566 if (!refcount_inc_not_zero(&n->refcnt))
567 n = NULL;
568 NEIGH_CACHE_STAT_INC(tbl, hits);
569 break;
570 }
571 }
572
573 rcu_read_unlock_bh();
574 return n;
575}
576EXPORT_SYMBOL(neigh_lookup_nodev);
577
578static struct neighbour *___neigh_create(struct neigh_table *tbl,
579 const void *pkey,
580 struct net_device *dev,
581 bool exempt_from_gc, bool want_ref)
582{
583 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
584 u32 hash_val;
585 unsigned int key_len = tbl->key_len;
586 int error;
587 struct neigh_hash_table *nht;
588
589 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
590
591 if (!n) {
592 rc = ERR_PTR(-ENOBUFS);
593 goto out;
594 }
595
596 memcpy(n->primary_key, pkey, key_len);
597 n->dev = dev;
598 dev_hold(dev);
599
600 /* Protocol specific setup. */
601 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
602 rc = ERR_PTR(error);
603 goto out_neigh_release;
604 }
605
606 if (dev->netdev_ops->ndo_neigh_construct) {
607 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
608 if (error < 0) {
609 rc = ERR_PTR(error);
610 goto out_neigh_release;
611 }
612 }
613
614 /* Device specific setup. */
615 if (n->parms->neigh_setup &&
616 (error = n->parms->neigh_setup(n)) < 0) {
617 rc = ERR_PTR(error);
618 goto out_neigh_release;
619 }
620
621 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
622
623 write_lock_bh(&tbl->lock);
624 nht = rcu_dereference_protected(tbl->nht,
625 lockdep_is_held(&tbl->lock));
626
627 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
628 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
629
630 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
631
632 if (n->parms->dead) {
633 rc = ERR_PTR(-EINVAL);
634 goto out_tbl_unlock;
635 }
636
637 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
638 lockdep_is_held(&tbl->lock));
639 n1 != NULL;
640 n1 = rcu_dereference_protected(n1->next,
641 lockdep_is_held(&tbl->lock))) {
642 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
643 if (want_ref)
644 neigh_hold(n1);
645 rc = n1;
646 goto out_tbl_unlock;
647 }
648 }
649
650 n->dead = 0;
651 if (!exempt_from_gc)
652 list_add_tail(&n->gc_list, &n->tbl->gc_list);
653
654 if (want_ref)
655 neigh_hold(n);
656 rcu_assign_pointer(n->next,
657 rcu_dereference_protected(nht->hash_buckets[hash_val],
658 lockdep_is_held(&tbl->lock)));
659 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
660 write_unlock_bh(&tbl->lock);
661 neigh_dbg(2, "neigh %p is created\n", n);
662 rc = n;
663out:
664 return rc;
665out_tbl_unlock:
666 write_unlock_bh(&tbl->lock);
667out_neigh_release:
668 if (!exempt_from_gc)
669 atomic_dec(&tbl->gc_entries);
670 neigh_release(n);
671 goto out;
672}
673
674struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
675 struct net_device *dev, bool want_ref)
676{
677 return ___neigh_create(tbl, pkey, dev, false, want_ref);
678}
679EXPORT_SYMBOL(__neigh_create);
680
681static u32 pneigh_hash(const void *pkey, unsigned int key_len)
682{
683 u32 hash_val = *(u32 *)(pkey + key_len - 4);
684 hash_val ^= (hash_val >> 16);
685 hash_val ^= hash_val >> 8;
686 hash_val ^= hash_val >> 4;
687 hash_val &= PNEIGH_HASHMASK;
688 return hash_val;
689}
690
691static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
692 struct net *net,
693 const void *pkey,
694 unsigned int key_len,
695 struct net_device *dev)
696{
697 while (n) {
698 if (!memcmp(n->key, pkey, key_len) &&
699 net_eq(pneigh_net(n), net) &&
700 (n->dev == dev || !n->dev))
701 return n;
702 n = n->next;
703 }
704 return NULL;
705}
706
707struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
708 struct net *net, const void *pkey, struct net_device *dev)
709{
710 unsigned int key_len = tbl->key_len;
711 u32 hash_val = pneigh_hash(pkey, key_len);
712
713 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
714 net, pkey, key_len, dev);
715}
716EXPORT_SYMBOL_GPL(__pneigh_lookup);
717
718struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
719 struct net *net, const void *pkey,
720 struct net_device *dev, int creat)
721{
722 struct pneigh_entry *n;
723 unsigned int key_len = tbl->key_len;
724 u32 hash_val = pneigh_hash(pkey, key_len);
725
726 read_lock_bh(&tbl->lock);
727 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
728 net, pkey, key_len, dev);
729 read_unlock_bh(&tbl->lock);
730
731 if (n || !creat)
732 goto out;
733
734 ASSERT_RTNL();
735
736 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
737 if (!n)
738 goto out;
739
740 n->protocol = 0;
741 write_pnet(&n->net, net);
742 memcpy(n->key, pkey, key_len);
743 n->dev = dev;
744 if (dev)
745 dev_hold(dev);
746
747 if (tbl->pconstructor && tbl->pconstructor(n)) {
748 if (dev)
749 dev_put(dev);
750 kfree(n);
751 n = NULL;
752 goto out;
753 }
754
755 write_lock_bh(&tbl->lock);
756 n->next = tbl->phash_buckets[hash_val];
757 tbl->phash_buckets[hash_val] = n;
758 write_unlock_bh(&tbl->lock);
759out:
760 return n;
761}
762EXPORT_SYMBOL(pneigh_lookup);
763
764
765int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
766 struct net_device *dev)
767{
768 struct pneigh_entry *n, **np;
769 unsigned int key_len = tbl->key_len;
770 u32 hash_val = pneigh_hash(pkey, key_len);
771
772 write_lock_bh(&tbl->lock);
773 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
774 np = &n->next) {
775 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
776 net_eq(pneigh_net(n), net)) {
777 *np = n->next;
778 write_unlock_bh(&tbl->lock);
779 if (tbl->pdestructor)
780 tbl->pdestructor(n);
781 if (n->dev)
782 dev_put(n->dev);
783 kfree(n);
784 return 0;
785 }
786 }
787 write_unlock_bh(&tbl->lock);
788 return -ENOENT;
789}
790
791static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
792 struct net_device *dev)
793{
794 struct pneigh_entry *n, **np, *freelist = NULL;
795 u32 h;
796
797 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
798 np = &tbl->phash_buckets[h];
799 while ((n = *np) != NULL) {
800 if (!dev || n->dev == dev) {
801 *np = n->next;
802 n->next = freelist;
803 freelist = n;
804 continue;
805 }
806 np = &n->next;
807 }
808 }
809 write_unlock_bh(&tbl->lock);
810 while ((n = freelist)) {
811 freelist = n->next;
812 n->next = NULL;
813 if (tbl->pdestructor)
814 tbl->pdestructor(n);
815 if (n->dev)
816 dev_put(n->dev);
817 kfree(n);
818 }
819 return -ENOENT;
820}
821
822static void neigh_parms_destroy(struct neigh_parms *parms);
823
824static inline void neigh_parms_put(struct neigh_parms *parms)
825{
826 if (refcount_dec_and_test(&parms->refcnt))
827 neigh_parms_destroy(parms);
828}
829
830/*
831 * neighbour must already be out of the table;
832 *
833 */
834void neigh_destroy(struct neighbour *neigh)
835{
836 struct net_device *dev = neigh->dev;
837
838 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
839
840 if (!neigh->dead) {
841 pr_warn("Destroying alive neighbour %p\n", neigh);
842 dump_stack();
843 return;
844 }
845
846 if (neigh_del_timer(neigh))
847 pr_warn("Impossible event\n");
848
849 write_lock_bh(&neigh->lock);
850 __skb_queue_purge(&neigh->arp_queue);
851 write_unlock_bh(&neigh->lock);
852 neigh->arp_queue_len_bytes = 0;
853
854 if (dev->netdev_ops->ndo_neigh_destroy)
855 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
856
857 dev_put(dev);
858 neigh_parms_put(neigh->parms);
859
860 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
861
862 atomic_dec(&neigh->tbl->entries);
863 kfree_rcu(neigh, rcu);
864}
865EXPORT_SYMBOL(neigh_destroy);
866
867/* Neighbour state is suspicious;
868 disable fast path.
869
870 Called with write_locked neigh.
871 */
872static void neigh_suspect(struct neighbour *neigh)
873{
874 neigh_dbg(2, "neigh %p is suspected\n", neigh);
875
876 neigh->output = neigh->ops->output;
877}
878
879/* Neighbour state is OK;
880 enable fast path.
881
882 Called with write_locked neigh.
883 */
884static void neigh_connect(struct neighbour *neigh)
885{
886 neigh_dbg(2, "neigh %p is connected\n", neigh);
887
888 neigh->output = neigh->ops->connected_output;
889}
890
891static void neigh_periodic_work(struct work_struct *work)
892{
893 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
894 struct neighbour *n;
895 struct neighbour __rcu **np;
896 unsigned int i;
897 struct neigh_hash_table *nht;
898
899 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
900
901 write_lock_bh(&tbl->lock);
902 nht = rcu_dereference_protected(tbl->nht,
903 lockdep_is_held(&tbl->lock));
904
905 /*
906 * periodically recompute ReachableTime from random function
907 */
908
909 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
910 struct neigh_parms *p;
911 tbl->last_rand = jiffies;
912 list_for_each_entry(p, &tbl->parms_list, list)
913 p->reachable_time =
914 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
915 }
916
917 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
918 goto out;
919
920 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
921 np = &nht->hash_buckets[i];
922
923 while ((n = rcu_dereference_protected(*np,
924 lockdep_is_held(&tbl->lock))) != NULL) {
925 unsigned int state;
926
927 write_lock(&n->lock);
928
929 state = n->nud_state;
930 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
931 (n->flags & NTF_EXT_LEARNED)) {
932 write_unlock(&n->lock);
933 goto next_elt;
934 }
935
936 if (time_before(n->used, n->confirmed))
937 n->used = n->confirmed;
938
939 if (refcount_read(&n->refcnt) == 1 &&
940 (state == NUD_FAILED ||
941 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
942 *np = n->next;
943 neigh_mark_dead(n);
944 write_unlock(&n->lock);
945 neigh_cleanup_and_release(n);
946 continue;
947 }
948 write_unlock(&n->lock);
949
950next_elt:
951 np = &n->next;
952 }
953 /*
954 * It's fine to release lock here, even if hash table
955 * grows while we are preempted.
956 */
957 write_unlock_bh(&tbl->lock);
958 cond_resched();
959 write_lock_bh(&tbl->lock);
960 nht = rcu_dereference_protected(tbl->nht,
961 lockdep_is_held(&tbl->lock));
962 }
963out:
964 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
965 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
966 * BASE_REACHABLE_TIME.
967 */
968 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
969 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
970 write_unlock_bh(&tbl->lock);
971}
972
973static __inline__ int neigh_max_probes(struct neighbour *n)
974{
975 struct neigh_parms *p = n->parms;
976 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
977 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
978 NEIGH_VAR(p, MCAST_PROBES));
979}
980
981static void neigh_invalidate(struct neighbour *neigh)
982 __releases(neigh->lock)
983 __acquires(neigh->lock)
984{
985 struct sk_buff *skb;
986
987 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
988 neigh_dbg(2, "neigh %p is failed\n", neigh);
989 neigh->updated = jiffies;
990
991 /* It is very thin place. report_unreachable is very complicated
992 routine. Particularly, it can hit the same neighbour entry!
993
994 So that, we try to be accurate and avoid dead loop. --ANK
995 */
996 while (neigh->nud_state == NUD_FAILED &&
997 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
998 write_unlock(&neigh->lock);
999 neigh->ops->error_report(neigh, skb);
1000 write_lock(&neigh->lock);
1001 }
1002 __skb_queue_purge(&neigh->arp_queue);
1003 neigh->arp_queue_len_bytes = 0;
1004}
1005
1006static void neigh_probe(struct neighbour *neigh)
1007 __releases(neigh->lock)
1008{
1009 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1010 /* keep skb alive even if arp_queue overflows */
1011 if (skb)
1012 skb = skb_clone(skb, GFP_ATOMIC);
1013 write_unlock(&neigh->lock);
1014 if (neigh->ops->solicit)
1015 neigh->ops->solicit(neigh, skb);
1016 atomic_inc(&neigh->probes);
1017 consume_skb(skb);
1018}
1019
1020/* Called when a timer expires for a neighbour entry. */
1021
1022static void neigh_timer_handler(struct timer_list *t)
1023{
1024 unsigned long now, next;
1025 struct neighbour *neigh = from_timer(neigh, t, timer);
1026 unsigned int state;
1027 int notify = 0;
1028
1029 write_lock(&neigh->lock);
1030
1031 state = neigh->nud_state;
1032 now = jiffies;
1033 next = now + HZ;
1034
1035 if (!(state & NUD_IN_TIMER))
1036 goto out;
1037
1038 if (state & NUD_REACHABLE) {
1039 if (time_before_eq(now,
1040 neigh->confirmed + neigh->parms->reachable_time)) {
1041 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1042 next = neigh->confirmed + neigh->parms->reachable_time;
1043 } else if (time_before_eq(now,
1044 neigh->used +
1045 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1046 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1047 neigh->nud_state = NUD_DELAY;
1048 neigh->updated = jiffies;
1049 neigh_suspect(neigh);
1050 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1051 } else {
1052 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1053 neigh->nud_state = NUD_STALE;
1054 neigh->updated = jiffies;
1055 neigh_suspect(neigh);
1056 notify = 1;
1057 }
1058 } else if (state & NUD_DELAY) {
1059 if (time_before_eq(now,
1060 neigh->confirmed +
1061 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1062 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1063 neigh->nud_state = NUD_REACHABLE;
1064 neigh->updated = jiffies;
1065 neigh_connect(neigh);
1066 notify = 1;
1067 next = neigh->confirmed + neigh->parms->reachable_time;
1068 } else {
1069 neigh_dbg(2, "neigh %p is probed\n", neigh);
1070 neigh->nud_state = NUD_PROBE;
1071 neigh->updated = jiffies;
1072 atomic_set(&neigh->probes, 0);
1073 notify = 1;
1074 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1075 HZ/100);
1076 }
1077 } else {
1078 /* NUD_PROBE|NUD_INCOMPLETE */
1079 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1080 }
1081
1082 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1083 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1084 neigh->nud_state = NUD_FAILED;
1085 notify = 1;
1086 neigh_invalidate(neigh);
1087 goto out;
1088 }
1089
1090 if (neigh->nud_state & NUD_IN_TIMER) {
1091 if (time_before(next, jiffies + HZ/100))
1092 next = jiffies + HZ/100;
1093 if (!mod_timer(&neigh->timer, next))
1094 neigh_hold(neigh);
1095 }
1096 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1097 neigh_probe(neigh);
1098 } else {
1099out:
1100 write_unlock(&neigh->lock);
1101 }
1102
1103 if (notify)
1104 neigh_update_notify(neigh, 0);
1105
1106 trace_neigh_timer_handler(neigh, 0);
1107
1108 neigh_release(neigh);
1109}
1110
1111int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1112{
1113 int rc;
1114 bool immediate_probe = false;
1115
1116 write_lock_bh(&neigh->lock);
1117
1118 rc = 0;
1119 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1120 goto out_unlock_bh;
1121 if (neigh->dead)
1122 goto out_dead;
1123
1124 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1125 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1126 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1127 unsigned long next, now = jiffies;
1128
1129 atomic_set(&neigh->probes,
1130 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1131 neigh_del_timer(neigh);
1132 neigh->nud_state = NUD_INCOMPLETE;
1133 neigh->updated = now;
1134 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1135 HZ/100);
1136 neigh_add_timer(neigh, next);
1137 immediate_probe = true;
1138 } else {
1139 neigh->nud_state = NUD_FAILED;
1140 neigh->updated = jiffies;
1141 write_unlock_bh(&neigh->lock);
1142
1143 kfree_skb(skb);
1144 return 1;
1145 }
1146 } else if (neigh->nud_state & NUD_STALE) {
1147 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1148 neigh_del_timer(neigh);
1149 neigh->nud_state = NUD_DELAY;
1150 neigh->updated = jiffies;
1151 neigh_add_timer(neigh, jiffies +
1152 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1153 }
1154
1155 if (neigh->nud_state == NUD_INCOMPLETE) {
1156 if (skb) {
1157 while (neigh->arp_queue_len_bytes + skb->truesize >
1158 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1159 struct sk_buff *buff;
1160
1161 buff = __skb_dequeue(&neigh->arp_queue);
1162 if (!buff)
1163 break;
1164 neigh->arp_queue_len_bytes -= buff->truesize;
1165 kfree_skb(buff);
1166 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1167 }
1168 skb_dst_force(skb);
1169 __skb_queue_tail(&neigh->arp_queue, skb);
1170 neigh->arp_queue_len_bytes += skb->truesize;
1171 }
1172 rc = 1;
1173 }
1174out_unlock_bh:
1175 if (immediate_probe)
1176 neigh_probe(neigh);
1177 else
1178 write_unlock(&neigh->lock);
1179 local_bh_enable();
1180 trace_neigh_event_send_done(neigh, rc);
1181 return rc;
1182
1183out_dead:
1184 if (neigh->nud_state & NUD_STALE)
1185 goto out_unlock_bh;
1186 write_unlock_bh(&neigh->lock);
1187 kfree_skb(skb);
1188 trace_neigh_event_send_dead(neigh, 1);
1189 return 1;
1190}
1191EXPORT_SYMBOL(__neigh_event_send);
1192
1193static void neigh_update_hhs(struct neighbour *neigh)
1194{
1195 struct hh_cache *hh;
1196 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1197 = NULL;
1198
1199 if (neigh->dev->header_ops)
1200 update = neigh->dev->header_ops->cache_update;
1201
1202 if (update) {
1203 hh = &neigh->hh;
1204 if (READ_ONCE(hh->hh_len)) {
1205 write_seqlock_bh(&hh->hh_lock);
1206 update(hh, neigh->dev, neigh->ha);
1207 write_sequnlock_bh(&hh->hh_lock);
1208 }
1209 }
1210}
1211
1212
1213
1214/* Generic update routine.
1215 -- lladdr is new lladdr or NULL, if it is not supplied.
1216 -- new is new state.
1217 -- flags
1218 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1219 if it is different.
1220 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1221 lladdr instead of overriding it
1222 if it is different.
1223 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1224
1225 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1226 NTF_ROUTER flag.
1227 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1228 a router.
1229
1230 Caller MUST hold reference count on the entry.
1231 */
1232
1233static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1234 u8 new, u32 flags, u32 nlmsg_pid,
1235 struct netlink_ext_ack *extack)
1236{
1237 bool ext_learn_change = false;
1238 u8 old;
1239 int err;
1240 int notify = 0;
1241 struct net_device *dev;
1242 int update_isrouter = 0;
1243
1244 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1245
1246 write_lock_bh(&neigh->lock);
1247
1248 dev = neigh->dev;
1249 old = neigh->nud_state;
1250 err = -EPERM;
1251
1252 if (neigh->dead) {
1253 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1254 new = old;
1255 goto out;
1256 }
1257 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1258 (old & (NUD_NOARP | NUD_PERMANENT)))
1259 goto out;
1260
1261 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify);
1262
1263 if (!(new & NUD_VALID)) {
1264 neigh_del_timer(neigh);
1265 if (old & NUD_CONNECTED)
1266 neigh_suspect(neigh);
1267 neigh->nud_state = new;
1268 err = 0;
1269 notify = old & NUD_VALID;
1270 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1271 (new & NUD_FAILED)) {
1272 neigh_invalidate(neigh);
1273 notify = 1;
1274 }
1275 goto out;
1276 }
1277
1278 /* Compare new lladdr with cached one */
1279 if (!dev->addr_len) {
1280 /* First case: device needs no address. */
1281 lladdr = neigh->ha;
1282 } else if (lladdr) {
1283 /* The second case: if something is already cached
1284 and a new address is proposed:
1285 - compare new & old
1286 - if they are different, check override flag
1287 */
1288 if ((old & NUD_VALID) &&
1289 !memcmp(lladdr, neigh->ha, dev->addr_len))
1290 lladdr = neigh->ha;
1291 } else {
1292 /* No address is supplied; if we know something,
1293 use it, otherwise discard the request.
1294 */
1295 err = -EINVAL;
1296 if (!(old & NUD_VALID)) {
1297 NL_SET_ERR_MSG(extack, "No link layer address given");
1298 goto out;
1299 }
1300 lladdr = neigh->ha;
1301 }
1302
1303 /* Update confirmed timestamp for neighbour entry after we
1304 * received ARP packet even if it doesn't change IP to MAC binding.
1305 */
1306 if (new & NUD_CONNECTED)
1307 neigh->confirmed = jiffies;
1308
1309 /* If entry was valid and address is not changed,
1310 do not change entry state, if new one is STALE.
1311 */
1312 err = 0;
1313 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1314 if (old & NUD_VALID) {
1315 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1316 update_isrouter = 0;
1317 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1318 (old & NUD_CONNECTED)) {
1319 lladdr = neigh->ha;
1320 new = NUD_STALE;
1321 } else
1322 goto out;
1323 } else {
1324 if (lladdr == neigh->ha && new == NUD_STALE &&
1325 !(flags & NEIGH_UPDATE_F_ADMIN))
1326 new = old;
1327 }
1328 }
1329
1330 /* Update timestamp only once we know we will make a change to the
1331 * neighbour entry. Otherwise we risk to move the locktime window with
1332 * noop updates and ignore relevant ARP updates.
1333 */
1334 if (new != old || lladdr != neigh->ha)
1335 neigh->updated = jiffies;
1336
1337 if (new != old) {
1338 neigh_del_timer(neigh);
1339 if (new & NUD_PROBE)
1340 atomic_set(&neigh->probes, 0);
1341 if (new & NUD_IN_TIMER)
1342 neigh_add_timer(neigh, (jiffies +
1343 ((new & NUD_REACHABLE) ?
1344 neigh->parms->reachable_time :
1345 0)));
1346 neigh->nud_state = new;
1347 notify = 1;
1348 }
1349
1350 if (lladdr != neigh->ha) {
1351 write_seqlock(&neigh->ha_lock);
1352 memcpy(&neigh->ha, lladdr, dev->addr_len);
1353 write_sequnlock(&neigh->ha_lock);
1354 neigh_update_hhs(neigh);
1355 if (!(new & NUD_CONNECTED))
1356 neigh->confirmed = jiffies -
1357 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1358 notify = 1;
1359 }
1360 if (new == old)
1361 goto out;
1362 if (new & NUD_CONNECTED)
1363 neigh_connect(neigh);
1364 else
1365 neigh_suspect(neigh);
1366 if (!(old & NUD_VALID)) {
1367 struct sk_buff *skb;
1368
1369 /* Again: avoid dead loop if something went wrong */
1370
1371 while (neigh->nud_state & NUD_VALID &&
1372 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1373 struct dst_entry *dst = skb_dst(skb);
1374 struct neighbour *n2, *n1 = neigh;
1375 write_unlock_bh(&neigh->lock);
1376
1377 rcu_read_lock();
1378
1379 /* Why not just use 'neigh' as-is? The problem is that
1380 * things such as shaper, eql, and sch_teql can end up
1381 * using alternative, different, neigh objects to output
1382 * the packet in the output path. So what we need to do
1383 * here is re-lookup the top-level neigh in the path so
1384 * we can reinject the packet there.
1385 */
1386 n2 = NULL;
1387 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1388 n2 = dst_neigh_lookup_skb(dst, skb);
1389 if (n2)
1390 n1 = n2;
1391 }
1392 n1->output(n1, skb);
1393 if (n2)
1394 neigh_release(n2);
1395 rcu_read_unlock();
1396
1397 write_lock_bh(&neigh->lock);
1398 }
1399 __skb_queue_purge(&neigh->arp_queue);
1400 neigh->arp_queue_len_bytes = 0;
1401 }
1402out:
1403 if (update_isrouter)
1404 neigh_update_is_router(neigh, flags, ¬ify);
1405 write_unlock_bh(&neigh->lock);
1406
1407 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1408 neigh_update_gc_list(neigh);
1409
1410 if (notify)
1411 neigh_update_notify(neigh, nlmsg_pid);
1412
1413 trace_neigh_update_done(neigh, err);
1414
1415 return err;
1416}
1417
1418int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1419 u32 flags, u32 nlmsg_pid)
1420{
1421 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1422}
1423EXPORT_SYMBOL(neigh_update);
1424
1425/* Update the neigh to listen temporarily for probe responses, even if it is
1426 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1427 */
1428void __neigh_set_probe_once(struct neighbour *neigh)
1429{
1430 if (neigh->dead)
1431 return;
1432 neigh->updated = jiffies;
1433 if (!(neigh->nud_state & NUD_FAILED))
1434 return;
1435 neigh->nud_state = NUD_INCOMPLETE;
1436 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1437 neigh_add_timer(neigh,
1438 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1439 HZ/100));
1440}
1441EXPORT_SYMBOL(__neigh_set_probe_once);
1442
1443struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1444 u8 *lladdr, void *saddr,
1445 struct net_device *dev)
1446{
1447 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1448 lladdr || !dev->addr_len);
1449 if (neigh)
1450 neigh_update(neigh, lladdr, NUD_STALE,
1451 NEIGH_UPDATE_F_OVERRIDE, 0);
1452 return neigh;
1453}
1454EXPORT_SYMBOL(neigh_event_ns);
1455
1456/* called with read_lock_bh(&n->lock); */
1457static void neigh_hh_init(struct neighbour *n)
1458{
1459 struct net_device *dev = n->dev;
1460 __be16 prot = n->tbl->protocol;
1461 struct hh_cache *hh = &n->hh;
1462
1463 write_lock_bh(&n->lock);
1464
1465 /* Only one thread can come in here and initialize the
1466 * hh_cache entry.
1467 */
1468 if (!hh->hh_len)
1469 dev->header_ops->cache(n, hh, prot);
1470
1471 write_unlock_bh(&n->lock);
1472}
1473
1474/* Slow and careful. */
1475
1476int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1477{
1478 int rc = 0;
1479
1480 if (!neigh_event_send(neigh, skb)) {
1481 int err;
1482 struct net_device *dev = neigh->dev;
1483 unsigned int seq;
1484
1485 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1486 neigh_hh_init(neigh);
1487
1488 do {
1489 __skb_pull(skb, skb_network_offset(skb));
1490 seq = read_seqbegin(&neigh->ha_lock);
1491 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1492 neigh->ha, NULL, skb->len);
1493 } while (read_seqretry(&neigh->ha_lock, seq));
1494
1495 if (err >= 0)
1496 rc = dev_queue_xmit(skb);
1497 else
1498 goto out_kfree_skb;
1499 }
1500out:
1501 return rc;
1502out_kfree_skb:
1503 rc = -EINVAL;
1504 kfree_skb(skb);
1505 goto out;
1506}
1507EXPORT_SYMBOL(neigh_resolve_output);
1508
1509/* As fast as possible without hh cache */
1510
1511int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1512{
1513 struct net_device *dev = neigh->dev;
1514 unsigned int seq;
1515 int err;
1516
1517 do {
1518 __skb_pull(skb, skb_network_offset(skb));
1519 seq = read_seqbegin(&neigh->ha_lock);
1520 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1521 neigh->ha, NULL, skb->len);
1522 } while (read_seqretry(&neigh->ha_lock, seq));
1523
1524 if (err >= 0)
1525 err = dev_queue_xmit(skb);
1526 else {
1527 err = -EINVAL;
1528 kfree_skb(skb);
1529 }
1530 return err;
1531}
1532EXPORT_SYMBOL(neigh_connected_output);
1533
1534int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1535{
1536 return dev_queue_xmit(skb);
1537}
1538EXPORT_SYMBOL(neigh_direct_output);
1539
1540static void neigh_proxy_process(struct timer_list *t)
1541{
1542 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1543 long sched_next = 0;
1544 unsigned long now = jiffies;
1545 struct sk_buff *skb, *n;
1546
1547 spin_lock(&tbl->proxy_queue.lock);
1548
1549 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1550 long tdif = NEIGH_CB(skb)->sched_next - now;
1551
1552 if (tdif <= 0) {
1553 struct net_device *dev = skb->dev;
1554
1555 __skb_unlink(skb, &tbl->proxy_queue);
1556 if (tbl->proxy_redo && netif_running(dev)) {
1557 rcu_read_lock();
1558 tbl->proxy_redo(skb);
1559 rcu_read_unlock();
1560 } else {
1561 kfree_skb(skb);
1562 }
1563
1564 dev_put(dev);
1565 } else if (!sched_next || tdif < sched_next)
1566 sched_next = tdif;
1567 }
1568 del_timer(&tbl->proxy_timer);
1569 if (sched_next)
1570 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1571 spin_unlock(&tbl->proxy_queue.lock);
1572}
1573
1574void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1575 struct sk_buff *skb)
1576{
1577 unsigned long sched_next = jiffies +
1578 prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
1579
1580 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1581 kfree_skb(skb);
1582 return;
1583 }
1584
1585 NEIGH_CB(skb)->sched_next = sched_next;
1586 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1587
1588 spin_lock(&tbl->proxy_queue.lock);
1589 if (del_timer(&tbl->proxy_timer)) {
1590 if (time_before(tbl->proxy_timer.expires, sched_next))
1591 sched_next = tbl->proxy_timer.expires;
1592 }
1593 skb_dst_drop(skb);
1594 dev_hold(skb->dev);
1595 __skb_queue_tail(&tbl->proxy_queue, skb);
1596 mod_timer(&tbl->proxy_timer, sched_next);
1597 spin_unlock(&tbl->proxy_queue.lock);
1598}
1599EXPORT_SYMBOL(pneigh_enqueue);
1600
1601static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1602 struct net *net, int ifindex)
1603{
1604 struct neigh_parms *p;
1605
1606 list_for_each_entry(p, &tbl->parms_list, list) {
1607 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1608 (!p->dev && !ifindex && net_eq(net, &init_net)))
1609 return p;
1610 }
1611
1612 return NULL;
1613}
1614
1615struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1616 struct neigh_table *tbl)
1617{
1618 struct neigh_parms *p;
1619 struct net *net = dev_net(dev);
1620 const struct net_device_ops *ops = dev->netdev_ops;
1621
1622 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1623 if (p) {
1624 p->tbl = tbl;
1625 refcount_set(&p->refcnt, 1);
1626 p->reachable_time =
1627 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1628 dev_hold(dev);
1629 p->dev = dev;
1630 write_pnet(&p->net, net);
1631 p->sysctl_table = NULL;
1632
1633 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1634 dev_put(dev);
1635 kfree(p);
1636 return NULL;
1637 }
1638
1639 write_lock_bh(&tbl->lock);
1640 list_add(&p->list, &tbl->parms.list);
1641 write_unlock_bh(&tbl->lock);
1642
1643 neigh_parms_data_state_cleanall(p);
1644 }
1645 return p;
1646}
1647EXPORT_SYMBOL(neigh_parms_alloc);
1648
1649static void neigh_rcu_free_parms(struct rcu_head *head)
1650{
1651 struct neigh_parms *parms =
1652 container_of(head, struct neigh_parms, rcu_head);
1653
1654 neigh_parms_put(parms);
1655}
1656
1657void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1658{
1659 if (!parms || parms == &tbl->parms)
1660 return;
1661 write_lock_bh(&tbl->lock);
1662 list_del(&parms->list);
1663 parms->dead = 1;
1664 write_unlock_bh(&tbl->lock);
1665 if (parms->dev)
1666 dev_put(parms->dev);
1667 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1668}
1669EXPORT_SYMBOL(neigh_parms_release);
1670
1671static void neigh_parms_destroy(struct neigh_parms *parms)
1672{
1673 kfree(parms);
1674}
1675
1676static struct lock_class_key neigh_table_proxy_queue_class;
1677
1678static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1679
1680void neigh_table_init(int index, struct neigh_table *tbl)
1681{
1682 unsigned long now = jiffies;
1683 unsigned long phsize;
1684
1685 INIT_LIST_HEAD(&tbl->parms_list);
1686 INIT_LIST_HEAD(&tbl->gc_list);
1687 list_add(&tbl->parms.list, &tbl->parms_list);
1688 write_pnet(&tbl->parms.net, &init_net);
1689 refcount_set(&tbl->parms.refcnt, 1);
1690 tbl->parms.reachable_time =
1691 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1692
1693 tbl->stats = alloc_percpu(struct neigh_statistics);
1694 if (!tbl->stats)
1695 panic("cannot create neighbour cache statistics");
1696
1697#ifdef CONFIG_PROC_FS
1698 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1699 &neigh_stat_seq_ops, tbl))
1700 panic("cannot create neighbour proc dir entry");
1701#endif
1702
1703 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1704
1705 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1706 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1707
1708 if (!tbl->nht || !tbl->phash_buckets)
1709 panic("cannot allocate neighbour cache hashes");
1710
1711 if (!tbl->entry_size)
1712 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1713 tbl->key_len, NEIGH_PRIV_ALIGN);
1714 else
1715 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1716
1717 rwlock_init(&tbl->lock);
1718 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1719 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1720 tbl->parms.reachable_time);
1721 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1722 skb_queue_head_init_class(&tbl->proxy_queue,
1723 &neigh_table_proxy_queue_class);
1724
1725 tbl->last_flush = now;
1726 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1727
1728 neigh_tables[index] = tbl;
1729}
1730EXPORT_SYMBOL(neigh_table_init);
1731
1732int neigh_table_clear(int index, struct neigh_table *tbl)
1733{
1734 neigh_tables[index] = NULL;
1735 /* It is not clean... Fix it to unload IPv6 module safely */
1736 cancel_delayed_work_sync(&tbl->gc_work);
1737 del_timer_sync(&tbl->proxy_timer);
1738 pneigh_queue_purge(&tbl->proxy_queue);
1739 neigh_ifdown(tbl, NULL);
1740 if (atomic_read(&tbl->entries))
1741 pr_crit("neighbour leakage\n");
1742
1743 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1744 neigh_hash_free_rcu);
1745 tbl->nht = NULL;
1746
1747 kfree(tbl->phash_buckets);
1748 tbl->phash_buckets = NULL;
1749
1750 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1751
1752 free_percpu(tbl->stats);
1753 tbl->stats = NULL;
1754
1755 return 0;
1756}
1757EXPORT_SYMBOL(neigh_table_clear);
1758
1759static struct neigh_table *neigh_find_table(int family)
1760{
1761 struct neigh_table *tbl = NULL;
1762
1763 switch (family) {
1764 case AF_INET:
1765 tbl = neigh_tables[NEIGH_ARP_TABLE];
1766 break;
1767 case AF_INET6:
1768 tbl = neigh_tables[NEIGH_ND_TABLE];
1769 break;
1770 case AF_DECnet:
1771 tbl = neigh_tables[NEIGH_DN_TABLE];
1772 break;
1773 }
1774
1775 return tbl;
1776}
1777
1778const struct nla_policy nda_policy[NDA_MAX+1] = {
1779 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1780 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1781 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1782 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1783 [NDA_PROBES] = { .type = NLA_U32 },
1784 [NDA_VLAN] = { .type = NLA_U16 },
1785 [NDA_PORT] = { .type = NLA_U16 },
1786 [NDA_VNI] = { .type = NLA_U32 },
1787 [NDA_IFINDEX] = { .type = NLA_U32 },
1788 [NDA_MASTER] = { .type = NLA_U32 },
1789 [NDA_PROTOCOL] = { .type = NLA_U8 },
1790 [NDA_NH_ID] = { .type = NLA_U32 },
1791 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1792};
1793
1794static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1795 struct netlink_ext_ack *extack)
1796{
1797 struct net *net = sock_net(skb->sk);
1798 struct ndmsg *ndm;
1799 struct nlattr *dst_attr;
1800 struct neigh_table *tbl;
1801 struct neighbour *neigh;
1802 struct net_device *dev = NULL;
1803 int err = -EINVAL;
1804
1805 ASSERT_RTNL();
1806 if (nlmsg_len(nlh) < sizeof(*ndm))
1807 goto out;
1808
1809 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1810 if (!dst_attr) {
1811 NL_SET_ERR_MSG(extack, "Network address not specified");
1812 goto out;
1813 }
1814
1815 ndm = nlmsg_data(nlh);
1816 if (ndm->ndm_ifindex) {
1817 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1818 if (dev == NULL) {
1819 err = -ENODEV;
1820 goto out;
1821 }
1822 }
1823
1824 tbl = neigh_find_table(ndm->ndm_family);
1825 if (tbl == NULL)
1826 return -EAFNOSUPPORT;
1827
1828 if (nla_len(dst_attr) < (int)tbl->key_len) {
1829 NL_SET_ERR_MSG(extack, "Invalid network address");
1830 goto out;
1831 }
1832
1833 if (ndm->ndm_flags & NTF_PROXY) {
1834 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1835 goto out;
1836 }
1837
1838 if (dev == NULL)
1839 goto out;
1840
1841 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1842 if (neigh == NULL) {
1843 err = -ENOENT;
1844 goto out;
1845 }
1846
1847 err = __neigh_update(neigh, NULL, NUD_FAILED,
1848 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1849 NETLINK_CB(skb).portid, extack);
1850 write_lock_bh(&tbl->lock);
1851 neigh_release(neigh);
1852 neigh_remove_one(neigh, tbl);
1853 write_unlock_bh(&tbl->lock);
1854
1855out:
1856 return err;
1857}
1858
1859static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1860 struct netlink_ext_ack *extack)
1861{
1862 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1863 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1864 struct net *net = sock_net(skb->sk);
1865 struct ndmsg *ndm;
1866 struct nlattr *tb[NDA_MAX+1];
1867 struct neigh_table *tbl;
1868 struct net_device *dev = NULL;
1869 struct neighbour *neigh;
1870 void *dst, *lladdr;
1871 u8 protocol = 0;
1872 int err;
1873
1874 ASSERT_RTNL();
1875 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1876 nda_policy, extack);
1877 if (err < 0)
1878 goto out;
1879
1880 err = -EINVAL;
1881 if (!tb[NDA_DST]) {
1882 NL_SET_ERR_MSG(extack, "Network address not specified");
1883 goto out;
1884 }
1885
1886 ndm = nlmsg_data(nlh);
1887 if (ndm->ndm_ifindex) {
1888 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1889 if (dev == NULL) {
1890 err = -ENODEV;
1891 goto out;
1892 }
1893
1894 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1895 NL_SET_ERR_MSG(extack, "Invalid link address");
1896 goto out;
1897 }
1898 }
1899
1900 tbl = neigh_find_table(ndm->ndm_family);
1901 if (tbl == NULL)
1902 return -EAFNOSUPPORT;
1903
1904 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1905 NL_SET_ERR_MSG(extack, "Invalid network address");
1906 goto out;
1907 }
1908
1909 dst = nla_data(tb[NDA_DST]);
1910 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1911
1912 if (tb[NDA_PROTOCOL])
1913 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1914
1915 if (ndm->ndm_flags & NTF_PROXY) {
1916 struct pneigh_entry *pn;
1917
1918 err = -ENOBUFS;
1919 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1920 if (pn) {
1921 pn->flags = ndm->ndm_flags;
1922 if (protocol)
1923 pn->protocol = protocol;
1924 err = 0;
1925 }
1926 goto out;
1927 }
1928
1929 if (!dev) {
1930 NL_SET_ERR_MSG(extack, "Device not specified");
1931 goto out;
1932 }
1933
1934 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1935 err = -EINVAL;
1936 goto out;
1937 }
1938
1939 neigh = neigh_lookup(tbl, dst, dev);
1940 if (neigh == NULL) {
1941 bool exempt_from_gc;
1942
1943 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1944 err = -ENOENT;
1945 goto out;
1946 }
1947
1948 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1949 ndm->ndm_flags & NTF_EXT_LEARNED;
1950 neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
1951 if (IS_ERR(neigh)) {
1952 err = PTR_ERR(neigh);
1953 goto out;
1954 }
1955 } else {
1956 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1957 err = -EEXIST;
1958 neigh_release(neigh);
1959 goto out;
1960 }
1961
1962 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1963 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1964 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1965 }
1966
1967 if (protocol)
1968 neigh->protocol = protocol;
1969
1970 if (ndm->ndm_flags & NTF_EXT_LEARNED)
1971 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1972
1973 if (ndm->ndm_flags & NTF_ROUTER)
1974 flags |= NEIGH_UPDATE_F_ISROUTER;
1975
1976 if (ndm->ndm_flags & NTF_USE) {
1977 neigh_event_send(neigh, NULL);
1978 err = 0;
1979 } else
1980 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1981 NETLINK_CB(skb).portid, extack);
1982
1983 neigh_release(neigh);
1984
1985out:
1986 return err;
1987}
1988
1989static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1990{
1991 struct nlattr *nest;
1992
1993 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
1994 if (nest == NULL)
1995 return -ENOBUFS;
1996
1997 if ((parms->dev &&
1998 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1999 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2000 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2001 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2002 /* approximative value for deprecated QUEUE_LEN (in packets) */
2003 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2004 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2005 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2006 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2007 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2008 NEIGH_VAR(parms, UCAST_PROBES)) ||
2009 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2010 NEIGH_VAR(parms, MCAST_PROBES)) ||
2011 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2012 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2013 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2014 NDTPA_PAD) ||
2015 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2016 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2017 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2018 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2019 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2020 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2021 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2022 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2023 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2024 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2025 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2026 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2027 nla_put_msecs(skb, NDTPA_LOCKTIME,
2028 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2029 goto nla_put_failure;
2030 return nla_nest_end(skb, nest);
2031
2032nla_put_failure:
2033 nla_nest_cancel(skb, nest);
2034 return -EMSGSIZE;
2035}
2036
2037static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2038 u32 pid, u32 seq, int type, int flags)
2039{
2040 struct nlmsghdr *nlh;
2041 struct ndtmsg *ndtmsg;
2042
2043 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2044 if (nlh == NULL)
2045 return -EMSGSIZE;
2046
2047 ndtmsg = nlmsg_data(nlh);
2048
2049 read_lock_bh(&tbl->lock);
2050 ndtmsg->ndtm_family = tbl->family;
2051 ndtmsg->ndtm_pad1 = 0;
2052 ndtmsg->ndtm_pad2 = 0;
2053
2054 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2055 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2056 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2057 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2058 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2059 goto nla_put_failure;
2060 {
2061 unsigned long now = jiffies;
2062 long flush_delta = now - tbl->last_flush;
2063 long rand_delta = now - tbl->last_rand;
2064 struct neigh_hash_table *nht;
2065 struct ndt_config ndc = {
2066 .ndtc_key_len = tbl->key_len,
2067 .ndtc_entry_size = tbl->entry_size,
2068 .ndtc_entries = atomic_read(&tbl->entries),
2069 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2070 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2071 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2072 };
2073
2074 rcu_read_lock_bh();
2075 nht = rcu_dereference_bh(tbl->nht);
2076 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2077 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2078 rcu_read_unlock_bh();
2079
2080 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2081 goto nla_put_failure;
2082 }
2083
2084 {
2085 int cpu;
2086 struct ndt_stats ndst;
2087
2088 memset(&ndst, 0, sizeof(ndst));
2089
2090 for_each_possible_cpu(cpu) {
2091 struct neigh_statistics *st;
2092
2093 st = per_cpu_ptr(tbl->stats, cpu);
2094 ndst.ndts_allocs += st->allocs;
2095 ndst.ndts_destroys += st->destroys;
2096 ndst.ndts_hash_grows += st->hash_grows;
2097 ndst.ndts_res_failed += st->res_failed;
2098 ndst.ndts_lookups += st->lookups;
2099 ndst.ndts_hits += st->hits;
2100 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2101 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2102 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2103 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
2104 ndst.ndts_table_fulls += st->table_fulls;
2105 }
2106
2107 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2108 NDTA_PAD))
2109 goto nla_put_failure;
2110 }
2111
2112 BUG_ON(tbl->parms.dev);
2113 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2114 goto nla_put_failure;
2115
2116 read_unlock_bh(&tbl->lock);
2117 nlmsg_end(skb, nlh);
2118 return 0;
2119
2120nla_put_failure:
2121 read_unlock_bh(&tbl->lock);
2122 nlmsg_cancel(skb, nlh);
2123 return -EMSGSIZE;
2124}
2125
2126static int neightbl_fill_param_info(struct sk_buff *skb,
2127 struct neigh_table *tbl,
2128 struct neigh_parms *parms,
2129 u32 pid, u32 seq, int type,
2130 unsigned int flags)
2131{
2132 struct ndtmsg *ndtmsg;
2133 struct nlmsghdr *nlh;
2134
2135 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2136 if (nlh == NULL)
2137 return -EMSGSIZE;
2138
2139 ndtmsg = nlmsg_data(nlh);
2140
2141 read_lock_bh(&tbl->lock);
2142 ndtmsg->ndtm_family = tbl->family;
2143 ndtmsg->ndtm_pad1 = 0;
2144 ndtmsg->ndtm_pad2 = 0;
2145
2146 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2147 neightbl_fill_parms(skb, parms) < 0)
2148 goto errout;
2149
2150 read_unlock_bh(&tbl->lock);
2151 nlmsg_end(skb, nlh);
2152 return 0;
2153errout:
2154 read_unlock_bh(&tbl->lock);
2155 nlmsg_cancel(skb, nlh);
2156 return -EMSGSIZE;
2157}
2158
2159static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2160 [NDTA_NAME] = { .type = NLA_STRING },
2161 [NDTA_THRESH1] = { .type = NLA_U32 },
2162 [NDTA_THRESH2] = { .type = NLA_U32 },
2163 [NDTA_THRESH3] = { .type = NLA_U32 },
2164 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2165 [NDTA_PARMS] = { .type = NLA_NESTED },
2166};
2167
2168static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2169 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2170 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2171 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2172 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2173 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2174 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2175 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2176 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2177 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2178 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2179 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2180 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2181 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2182 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2183};
2184
2185static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2186 struct netlink_ext_ack *extack)
2187{
2188 struct net *net = sock_net(skb->sk);
2189 struct neigh_table *tbl;
2190 struct ndtmsg *ndtmsg;
2191 struct nlattr *tb[NDTA_MAX+1];
2192 bool found = false;
2193 int err, tidx;
2194
2195 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2196 nl_neightbl_policy, extack);
2197 if (err < 0)
2198 goto errout;
2199
2200 if (tb[NDTA_NAME] == NULL) {
2201 err = -EINVAL;
2202 goto errout;
2203 }
2204
2205 ndtmsg = nlmsg_data(nlh);
2206
2207 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2208 tbl = neigh_tables[tidx];
2209 if (!tbl)
2210 continue;
2211 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2212 continue;
2213 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2214 found = true;
2215 break;
2216 }
2217 }
2218
2219 if (!found)
2220 return -ENOENT;
2221
2222 /*
2223 * We acquire tbl->lock to be nice to the periodic timers and
2224 * make sure they always see a consistent set of values.
2225 */
2226 write_lock_bh(&tbl->lock);
2227
2228 if (tb[NDTA_PARMS]) {
2229 struct nlattr *tbp[NDTPA_MAX+1];
2230 struct neigh_parms *p;
2231 int i, ifindex = 0;
2232
2233 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2234 tb[NDTA_PARMS],
2235 nl_ntbl_parm_policy, extack);
2236 if (err < 0)
2237 goto errout_tbl_lock;
2238
2239 if (tbp[NDTPA_IFINDEX])
2240 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2241
2242 p = lookup_neigh_parms(tbl, net, ifindex);
2243 if (p == NULL) {
2244 err = -ENOENT;
2245 goto errout_tbl_lock;
2246 }
2247
2248 for (i = 1; i <= NDTPA_MAX; i++) {
2249 if (tbp[i] == NULL)
2250 continue;
2251
2252 switch (i) {
2253 case NDTPA_QUEUE_LEN:
2254 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2255 nla_get_u32(tbp[i]) *
2256 SKB_TRUESIZE(ETH_FRAME_LEN));
2257 break;
2258 case NDTPA_QUEUE_LENBYTES:
2259 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2260 nla_get_u32(tbp[i]));
2261 break;
2262 case NDTPA_PROXY_QLEN:
2263 NEIGH_VAR_SET(p, PROXY_QLEN,
2264 nla_get_u32(tbp[i]));
2265 break;
2266 case NDTPA_APP_PROBES:
2267 NEIGH_VAR_SET(p, APP_PROBES,
2268 nla_get_u32(tbp[i]));
2269 break;
2270 case NDTPA_UCAST_PROBES:
2271 NEIGH_VAR_SET(p, UCAST_PROBES,
2272 nla_get_u32(tbp[i]));
2273 break;
2274 case NDTPA_MCAST_PROBES:
2275 NEIGH_VAR_SET(p, MCAST_PROBES,
2276 nla_get_u32(tbp[i]));
2277 break;
2278 case NDTPA_MCAST_REPROBES:
2279 NEIGH_VAR_SET(p, MCAST_REPROBES,
2280 nla_get_u32(tbp[i]));
2281 break;
2282 case NDTPA_BASE_REACHABLE_TIME:
2283 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2284 nla_get_msecs(tbp[i]));
2285 /* update reachable_time as well, otherwise, the change will
2286 * only be effective after the next time neigh_periodic_work
2287 * decides to recompute it (can be multiple minutes)
2288 */
2289 p->reachable_time =
2290 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2291 break;
2292 case NDTPA_GC_STALETIME:
2293 NEIGH_VAR_SET(p, GC_STALETIME,
2294 nla_get_msecs(tbp[i]));
2295 break;
2296 case NDTPA_DELAY_PROBE_TIME:
2297 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2298 nla_get_msecs(tbp[i]));
2299 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2300 break;
2301 case NDTPA_RETRANS_TIME:
2302 NEIGH_VAR_SET(p, RETRANS_TIME,
2303 nla_get_msecs(tbp[i]));
2304 break;
2305 case NDTPA_ANYCAST_DELAY:
2306 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2307 nla_get_msecs(tbp[i]));
2308 break;
2309 case NDTPA_PROXY_DELAY:
2310 NEIGH_VAR_SET(p, PROXY_DELAY,
2311 nla_get_msecs(tbp[i]));
2312 break;
2313 case NDTPA_LOCKTIME:
2314 NEIGH_VAR_SET(p, LOCKTIME,
2315 nla_get_msecs(tbp[i]));
2316 break;
2317 }
2318 }
2319 }
2320
2321 err = -ENOENT;
2322 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2323 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2324 !net_eq(net, &init_net))
2325 goto errout_tbl_lock;
2326
2327 if (tb[NDTA_THRESH1])
2328 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2329
2330 if (tb[NDTA_THRESH2])
2331 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2332
2333 if (tb[NDTA_THRESH3])
2334 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2335
2336 if (tb[NDTA_GC_INTERVAL])
2337 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2338
2339 err = 0;
2340
2341errout_tbl_lock:
2342 write_unlock_bh(&tbl->lock);
2343errout:
2344 return err;
2345}
2346
2347static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2348 struct netlink_ext_ack *extack)
2349{
2350 struct ndtmsg *ndtm;
2351
2352 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2353 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2354 return -EINVAL;
2355 }
2356
2357 ndtm = nlmsg_data(nlh);
2358 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2359 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2360 return -EINVAL;
2361 }
2362
2363 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2364 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2365 return -EINVAL;
2366 }
2367
2368 return 0;
2369}
2370
2371static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2372{
2373 const struct nlmsghdr *nlh = cb->nlh;
2374 struct net *net = sock_net(skb->sk);
2375 int family, tidx, nidx = 0;
2376 int tbl_skip = cb->args[0];
2377 int neigh_skip = cb->args[1];
2378 struct neigh_table *tbl;
2379
2380 if (cb->strict_check) {
2381 int err = neightbl_valid_dump_info(nlh, cb->extack);
2382
2383 if (err < 0)
2384 return err;
2385 }
2386
2387 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2388
2389 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2390 struct neigh_parms *p;
2391
2392 tbl = neigh_tables[tidx];
2393 if (!tbl)
2394 continue;
2395
2396 if (tidx < tbl_skip || (family && tbl->family != family))
2397 continue;
2398
2399 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2400 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2401 NLM_F_MULTI) < 0)
2402 break;
2403
2404 nidx = 0;
2405 p = list_next_entry(&tbl->parms, list);
2406 list_for_each_entry_from(p, &tbl->parms_list, list) {
2407 if (!net_eq(neigh_parms_net(p), net))
2408 continue;
2409
2410 if (nidx < neigh_skip)
2411 goto next;
2412
2413 if (neightbl_fill_param_info(skb, tbl, p,
2414 NETLINK_CB(cb->skb).portid,
2415 nlh->nlmsg_seq,
2416 RTM_NEWNEIGHTBL,
2417 NLM_F_MULTI) < 0)
2418 goto out;
2419 next:
2420 nidx++;
2421 }
2422
2423 neigh_skip = 0;
2424 }
2425out:
2426 cb->args[0] = tidx;
2427 cb->args[1] = nidx;
2428
2429 return skb->len;
2430}
2431
2432static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2433 u32 pid, u32 seq, int type, unsigned int flags)
2434{
2435 unsigned long now = jiffies;
2436 struct nda_cacheinfo ci;
2437 struct nlmsghdr *nlh;
2438 struct ndmsg *ndm;
2439
2440 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2441 if (nlh == NULL)
2442 return -EMSGSIZE;
2443
2444 ndm = nlmsg_data(nlh);
2445 ndm->ndm_family = neigh->ops->family;
2446 ndm->ndm_pad1 = 0;
2447 ndm->ndm_pad2 = 0;
2448 ndm->ndm_flags = neigh->flags;
2449 ndm->ndm_type = neigh->type;
2450 ndm->ndm_ifindex = neigh->dev->ifindex;
2451
2452 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2453 goto nla_put_failure;
2454
2455 read_lock_bh(&neigh->lock);
2456 ndm->ndm_state = neigh->nud_state;
2457 if (neigh->nud_state & NUD_VALID) {
2458 char haddr[MAX_ADDR_LEN];
2459
2460 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2461 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2462 read_unlock_bh(&neigh->lock);
2463 goto nla_put_failure;
2464 }
2465 }
2466
2467 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2468 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2469 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2470 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2471 read_unlock_bh(&neigh->lock);
2472
2473 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2474 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2475 goto nla_put_failure;
2476
2477 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2478 goto nla_put_failure;
2479
2480 nlmsg_end(skb, nlh);
2481 return 0;
2482
2483nla_put_failure:
2484 nlmsg_cancel(skb, nlh);
2485 return -EMSGSIZE;
2486}
2487
2488static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2489 u32 pid, u32 seq, int type, unsigned int flags,
2490 struct neigh_table *tbl)
2491{
2492 struct nlmsghdr *nlh;
2493 struct ndmsg *ndm;
2494
2495 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2496 if (nlh == NULL)
2497 return -EMSGSIZE;
2498
2499 ndm = nlmsg_data(nlh);
2500 ndm->ndm_family = tbl->family;
2501 ndm->ndm_pad1 = 0;
2502 ndm->ndm_pad2 = 0;
2503 ndm->ndm_flags = pn->flags | NTF_PROXY;
2504 ndm->ndm_type = RTN_UNICAST;
2505 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2506 ndm->ndm_state = NUD_NONE;
2507
2508 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2509 goto nla_put_failure;
2510
2511 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2512 goto nla_put_failure;
2513
2514 nlmsg_end(skb, nlh);
2515 return 0;
2516
2517nla_put_failure:
2518 nlmsg_cancel(skb, nlh);
2519 return -EMSGSIZE;
2520}
2521
2522static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2523{
2524 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2525 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2526}
2527
2528static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2529{
2530 struct net_device *master;
2531
2532 if (!master_idx)
2533 return false;
2534
2535 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2536 if (!master || master->ifindex != master_idx)
2537 return true;
2538
2539 return false;
2540}
2541
2542static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2543{
2544 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2545 return true;
2546
2547 return false;
2548}
2549
2550struct neigh_dump_filter {
2551 int master_idx;
2552 int dev_idx;
2553};
2554
2555static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2556 struct netlink_callback *cb,
2557 struct neigh_dump_filter *filter)
2558{
2559 struct net *net = sock_net(skb->sk);
2560 struct neighbour *n;
2561 int rc, h, s_h = cb->args[1];
2562 int idx, s_idx = idx = cb->args[2];
2563 struct neigh_hash_table *nht;
2564 unsigned int flags = NLM_F_MULTI;
2565
2566 if (filter->dev_idx || filter->master_idx)
2567 flags |= NLM_F_DUMP_FILTERED;
2568
2569 rcu_read_lock_bh();
2570 nht = rcu_dereference_bh(tbl->nht);
2571
2572 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2573 if (h > s_h)
2574 s_idx = 0;
2575 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2576 n != NULL;
2577 n = rcu_dereference_bh(n->next)) {
2578 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2579 goto next;
2580 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2581 neigh_master_filtered(n->dev, filter->master_idx))
2582 goto next;
2583 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2584 cb->nlh->nlmsg_seq,
2585 RTM_NEWNEIGH,
2586 flags) < 0) {
2587 rc = -1;
2588 goto out;
2589 }
2590next:
2591 idx++;
2592 }
2593 }
2594 rc = skb->len;
2595out:
2596 rcu_read_unlock_bh();
2597 cb->args[1] = h;
2598 cb->args[2] = idx;
2599 return rc;
2600}
2601
2602static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2603 struct netlink_callback *cb,
2604 struct neigh_dump_filter *filter)
2605{
2606 struct pneigh_entry *n;
2607 struct net *net = sock_net(skb->sk);
2608 int rc, h, s_h = cb->args[3];
2609 int idx, s_idx = idx = cb->args[4];
2610 unsigned int flags = NLM_F_MULTI;
2611
2612 if (filter->dev_idx || filter->master_idx)
2613 flags |= NLM_F_DUMP_FILTERED;
2614
2615 read_lock_bh(&tbl->lock);
2616
2617 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2618 if (h > s_h)
2619 s_idx = 0;
2620 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2621 if (idx < s_idx || pneigh_net(n) != net)
2622 goto next;
2623 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2624 neigh_master_filtered(n->dev, filter->master_idx))
2625 goto next;
2626 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2627 cb->nlh->nlmsg_seq,
2628 RTM_NEWNEIGH, flags, tbl) < 0) {
2629 read_unlock_bh(&tbl->lock);
2630 rc = -1;
2631 goto out;
2632 }
2633 next:
2634 idx++;
2635 }
2636 }
2637
2638 read_unlock_bh(&tbl->lock);
2639 rc = skb->len;
2640out:
2641 cb->args[3] = h;
2642 cb->args[4] = idx;
2643 return rc;
2644
2645}
2646
2647static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2648 bool strict_check,
2649 struct neigh_dump_filter *filter,
2650 struct netlink_ext_ack *extack)
2651{
2652 struct nlattr *tb[NDA_MAX + 1];
2653 int err, i;
2654
2655 if (strict_check) {
2656 struct ndmsg *ndm;
2657
2658 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2659 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2660 return -EINVAL;
2661 }
2662
2663 ndm = nlmsg_data(nlh);
2664 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2665 ndm->ndm_state || ndm->ndm_type) {
2666 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2667 return -EINVAL;
2668 }
2669
2670 if (ndm->ndm_flags & ~NTF_PROXY) {
2671 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2672 return -EINVAL;
2673 }
2674
2675 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2676 tb, NDA_MAX, nda_policy,
2677 extack);
2678 } else {
2679 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2680 NDA_MAX, nda_policy, extack);
2681 }
2682 if (err < 0)
2683 return err;
2684
2685 for (i = 0; i <= NDA_MAX; ++i) {
2686 if (!tb[i])
2687 continue;
2688
2689 /* all new attributes should require strict_check */
2690 switch (i) {
2691 case NDA_IFINDEX:
2692 filter->dev_idx = nla_get_u32(tb[i]);
2693 break;
2694 case NDA_MASTER:
2695 filter->master_idx = nla_get_u32(tb[i]);
2696 break;
2697 default:
2698 if (strict_check) {
2699 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2700 return -EINVAL;
2701 }
2702 }
2703 }
2704
2705 return 0;
2706}
2707
2708static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2709{
2710 const struct nlmsghdr *nlh = cb->nlh;
2711 struct neigh_dump_filter filter = {};
2712 struct neigh_table *tbl;
2713 int t, family, s_t;
2714 int proxy = 0;
2715 int err;
2716
2717 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2718
2719 /* check for full ndmsg structure presence, family member is
2720 * the same for both structures
2721 */
2722 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2723 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2724 proxy = 1;
2725
2726 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2727 if (err < 0 && cb->strict_check)
2728 return err;
2729
2730 s_t = cb->args[0];
2731
2732 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2733 tbl = neigh_tables[t];
2734
2735 if (!tbl)
2736 continue;
2737 if (t < s_t || (family && tbl->family != family))
2738 continue;
2739 if (t > s_t)
2740 memset(&cb->args[1], 0, sizeof(cb->args) -
2741 sizeof(cb->args[0]));
2742 if (proxy)
2743 err = pneigh_dump_table(tbl, skb, cb, &filter);
2744 else
2745 err = neigh_dump_table(tbl, skb, cb, &filter);
2746 if (err < 0)
2747 break;
2748 }
2749
2750 cb->args[0] = t;
2751 return skb->len;
2752}
2753
2754static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2755 struct neigh_table **tbl,
2756 void **dst, int *dev_idx, u8 *ndm_flags,
2757 struct netlink_ext_ack *extack)
2758{
2759 struct nlattr *tb[NDA_MAX + 1];
2760 struct ndmsg *ndm;
2761 int err, i;
2762
2763 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2764 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2765 return -EINVAL;
2766 }
2767
2768 ndm = nlmsg_data(nlh);
2769 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2770 ndm->ndm_type) {
2771 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2772 return -EINVAL;
2773 }
2774
2775 if (ndm->ndm_flags & ~NTF_PROXY) {
2776 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2777 return -EINVAL;
2778 }
2779
2780 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2781 NDA_MAX, nda_policy, extack);
2782 if (err < 0)
2783 return err;
2784
2785 *ndm_flags = ndm->ndm_flags;
2786 *dev_idx = ndm->ndm_ifindex;
2787 *tbl = neigh_find_table(ndm->ndm_family);
2788 if (*tbl == NULL) {
2789 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2790 return -EAFNOSUPPORT;
2791 }
2792
2793 for (i = 0; i <= NDA_MAX; ++i) {
2794 if (!tb[i])
2795 continue;
2796
2797 switch (i) {
2798 case NDA_DST:
2799 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2800 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2801 return -EINVAL;
2802 }
2803 *dst = nla_data(tb[i]);
2804 break;
2805 default:
2806 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2807 return -EINVAL;
2808 }
2809 }
2810
2811 return 0;
2812}
2813
2814static inline size_t neigh_nlmsg_size(void)
2815{
2816 return NLMSG_ALIGN(sizeof(struct ndmsg))
2817 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2818 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2819 + nla_total_size(sizeof(struct nda_cacheinfo))
2820 + nla_total_size(4) /* NDA_PROBES */
2821 + nla_total_size(1); /* NDA_PROTOCOL */
2822}
2823
2824static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2825 u32 pid, u32 seq)
2826{
2827 struct sk_buff *skb;
2828 int err = 0;
2829
2830 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2831 if (!skb)
2832 return -ENOBUFS;
2833
2834 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2835 if (err) {
2836 kfree_skb(skb);
2837 goto errout;
2838 }
2839
2840 err = rtnl_unicast(skb, net, pid);
2841errout:
2842 return err;
2843}
2844
2845static inline size_t pneigh_nlmsg_size(void)
2846{
2847 return NLMSG_ALIGN(sizeof(struct ndmsg))
2848 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2849 + nla_total_size(1); /* NDA_PROTOCOL */
2850}
2851
2852static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2853 u32 pid, u32 seq, struct neigh_table *tbl)
2854{
2855 struct sk_buff *skb;
2856 int err = 0;
2857
2858 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2859 if (!skb)
2860 return -ENOBUFS;
2861
2862 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2863 if (err) {
2864 kfree_skb(skb);
2865 goto errout;
2866 }
2867
2868 err = rtnl_unicast(skb, net, pid);
2869errout:
2870 return err;
2871}
2872
2873static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2874 struct netlink_ext_ack *extack)
2875{
2876 struct net *net = sock_net(in_skb->sk);
2877 struct net_device *dev = NULL;
2878 struct neigh_table *tbl = NULL;
2879 struct neighbour *neigh;
2880 void *dst = NULL;
2881 u8 ndm_flags = 0;
2882 int dev_idx = 0;
2883 int err;
2884
2885 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2886 extack);
2887 if (err < 0)
2888 return err;
2889
2890 if (dev_idx) {
2891 dev = __dev_get_by_index(net, dev_idx);
2892 if (!dev) {
2893 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2894 return -ENODEV;
2895 }
2896 }
2897
2898 if (!dst) {
2899 NL_SET_ERR_MSG(extack, "Network address not specified");
2900 return -EINVAL;
2901 }
2902
2903 if (ndm_flags & NTF_PROXY) {
2904 struct pneigh_entry *pn;
2905
2906 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2907 if (!pn) {
2908 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2909 return -ENOENT;
2910 }
2911 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2912 nlh->nlmsg_seq, tbl);
2913 }
2914
2915 if (!dev) {
2916 NL_SET_ERR_MSG(extack, "No device specified");
2917 return -EINVAL;
2918 }
2919
2920 neigh = neigh_lookup(tbl, dst, dev);
2921 if (!neigh) {
2922 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2923 return -ENOENT;
2924 }
2925
2926 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2927 nlh->nlmsg_seq);
2928
2929 neigh_release(neigh);
2930
2931 return err;
2932}
2933
2934void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2935{
2936 int chain;
2937 struct neigh_hash_table *nht;
2938
2939 rcu_read_lock_bh();
2940 nht = rcu_dereference_bh(tbl->nht);
2941
2942 read_lock(&tbl->lock); /* avoid resizes */
2943 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2944 struct neighbour *n;
2945
2946 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2947 n != NULL;
2948 n = rcu_dereference_bh(n->next))
2949 cb(n, cookie);
2950 }
2951 read_unlock(&tbl->lock);
2952 rcu_read_unlock_bh();
2953}
2954EXPORT_SYMBOL(neigh_for_each);
2955
2956/* The tbl->lock must be held as a writer and BH disabled. */
2957void __neigh_for_each_release(struct neigh_table *tbl,
2958 int (*cb)(struct neighbour *))
2959{
2960 int chain;
2961 struct neigh_hash_table *nht;
2962
2963 nht = rcu_dereference_protected(tbl->nht,
2964 lockdep_is_held(&tbl->lock));
2965 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2966 struct neighbour *n;
2967 struct neighbour __rcu **np;
2968
2969 np = &nht->hash_buckets[chain];
2970 while ((n = rcu_dereference_protected(*np,
2971 lockdep_is_held(&tbl->lock))) != NULL) {
2972 int release;
2973
2974 write_lock(&n->lock);
2975 release = cb(n);
2976 if (release) {
2977 rcu_assign_pointer(*np,
2978 rcu_dereference_protected(n->next,
2979 lockdep_is_held(&tbl->lock)));
2980 neigh_mark_dead(n);
2981 } else
2982 np = &n->next;
2983 write_unlock(&n->lock);
2984 if (release)
2985 neigh_cleanup_and_release(n);
2986 }
2987 }
2988}
2989EXPORT_SYMBOL(__neigh_for_each_release);
2990
2991int neigh_xmit(int index, struct net_device *dev,
2992 const void *addr, struct sk_buff *skb)
2993{
2994 int err = -EAFNOSUPPORT;
2995 if (likely(index < NEIGH_NR_TABLES)) {
2996 struct neigh_table *tbl;
2997 struct neighbour *neigh;
2998
2999 tbl = neigh_tables[index];
3000 if (!tbl)
3001 goto out;
3002 rcu_read_lock_bh();
3003 if (index == NEIGH_ARP_TABLE) {
3004 u32 key = *((u32 *)addr);
3005
3006 neigh = __ipv4_neigh_lookup_noref(dev, key);
3007 } else {
3008 neigh = __neigh_lookup_noref(tbl, addr, dev);
3009 }
3010 if (!neigh)
3011 neigh = __neigh_create(tbl, addr, dev, false);
3012 err = PTR_ERR(neigh);
3013 if (IS_ERR(neigh)) {
3014 rcu_read_unlock_bh();
3015 goto out_kfree_skb;
3016 }
3017 err = neigh->output(neigh, skb);
3018 rcu_read_unlock_bh();
3019 }
3020 else if (index == NEIGH_LINK_TABLE) {
3021 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3022 addr, NULL, skb->len);
3023 if (err < 0)
3024 goto out_kfree_skb;
3025 err = dev_queue_xmit(skb);
3026 }
3027out:
3028 return err;
3029out_kfree_skb:
3030 kfree_skb(skb);
3031 goto out;
3032}
3033EXPORT_SYMBOL(neigh_xmit);
3034
3035#ifdef CONFIG_PROC_FS
3036
3037static struct neighbour *neigh_get_first(struct seq_file *seq)
3038{
3039 struct neigh_seq_state *state = seq->private;
3040 struct net *net = seq_file_net(seq);
3041 struct neigh_hash_table *nht = state->nht;
3042 struct neighbour *n = NULL;
3043 int bucket;
3044
3045 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3046 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3047 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3048
3049 while (n) {
3050 if (!net_eq(dev_net(n->dev), net))
3051 goto next;
3052 if (state->neigh_sub_iter) {
3053 loff_t fakep = 0;
3054 void *v;
3055
3056 v = state->neigh_sub_iter(state, n, &fakep);
3057 if (!v)
3058 goto next;
3059 }
3060 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3061 break;
3062 if (n->nud_state & ~NUD_NOARP)
3063 break;
3064next:
3065 n = rcu_dereference_bh(n->next);
3066 }
3067
3068 if (n)
3069 break;
3070 }
3071 state->bucket = bucket;
3072
3073 return n;
3074}
3075
3076static struct neighbour *neigh_get_next(struct seq_file *seq,
3077 struct neighbour *n,
3078 loff_t *pos)
3079{
3080 struct neigh_seq_state *state = seq->private;
3081 struct net *net = seq_file_net(seq);
3082 struct neigh_hash_table *nht = state->nht;
3083
3084 if (state->neigh_sub_iter) {
3085 void *v = state->neigh_sub_iter(state, n, pos);
3086 if (v)
3087 return n;
3088 }
3089 n = rcu_dereference_bh(n->next);
3090
3091 while (1) {
3092 while (n) {
3093 if (!net_eq(dev_net(n->dev), net))
3094 goto next;
3095 if (state->neigh_sub_iter) {
3096 void *v = state->neigh_sub_iter(state, n, pos);
3097 if (v)
3098 return n;
3099 goto next;
3100 }
3101 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3102 break;
3103
3104 if (n->nud_state & ~NUD_NOARP)
3105 break;
3106next:
3107 n = rcu_dereference_bh(n->next);
3108 }
3109
3110 if (n)
3111 break;
3112
3113 if (++state->bucket >= (1 << nht->hash_shift))
3114 break;
3115
3116 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3117 }
3118
3119 if (n && pos)
3120 --(*pos);
3121 return n;
3122}
3123
3124static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3125{
3126 struct neighbour *n = neigh_get_first(seq);
3127
3128 if (n) {
3129 --(*pos);
3130 while (*pos) {
3131 n = neigh_get_next(seq, n, pos);
3132 if (!n)
3133 break;
3134 }
3135 }
3136 return *pos ? NULL : n;
3137}
3138
3139static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3140{
3141 struct neigh_seq_state *state = seq->private;
3142 struct net *net = seq_file_net(seq);
3143 struct neigh_table *tbl = state->tbl;
3144 struct pneigh_entry *pn = NULL;
3145 int bucket;
3146
3147 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3148 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3149 pn = tbl->phash_buckets[bucket];
3150 while (pn && !net_eq(pneigh_net(pn), net))
3151 pn = pn->next;
3152 if (pn)
3153 break;
3154 }
3155 state->bucket = bucket;
3156
3157 return pn;
3158}
3159
3160static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3161 struct pneigh_entry *pn,
3162 loff_t *pos)
3163{
3164 struct neigh_seq_state *state = seq->private;
3165 struct net *net = seq_file_net(seq);
3166 struct neigh_table *tbl = state->tbl;
3167
3168 do {
3169 pn = pn->next;
3170 } while (pn && !net_eq(pneigh_net(pn), net));
3171
3172 while (!pn) {
3173 if (++state->bucket > PNEIGH_HASHMASK)
3174 break;
3175 pn = tbl->phash_buckets[state->bucket];
3176 while (pn && !net_eq(pneigh_net(pn), net))
3177 pn = pn->next;
3178 if (pn)
3179 break;
3180 }
3181
3182 if (pn && pos)
3183 --(*pos);
3184
3185 return pn;
3186}
3187
3188static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3189{
3190 struct pneigh_entry *pn = pneigh_get_first(seq);
3191
3192 if (pn) {
3193 --(*pos);
3194 while (*pos) {
3195 pn = pneigh_get_next(seq, pn, pos);
3196 if (!pn)
3197 break;
3198 }
3199 }
3200 return *pos ? NULL : pn;
3201}
3202
3203static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3204{
3205 struct neigh_seq_state *state = seq->private;
3206 void *rc;
3207 loff_t idxpos = *pos;
3208
3209 rc = neigh_get_idx(seq, &idxpos);
3210 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3211 rc = pneigh_get_idx(seq, &idxpos);
3212
3213 return rc;
3214}
3215
3216void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3217 __acquires(tbl->lock)
3218 __acquires(rcu_bh)
3219{
3220 struct neigh_seq_state *state = seq->private;
3221
3222 state->tbl = tbl;
3223 state->bucket = 0;
3224 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3225
3226 rcu_read_lock_bh();
3227 state->nht = rcu_dereference_bh(tbl->nht);
3228 read_lock(&tbl->lock);
3229
3230 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3231}
3232EXPORT_SYMBOL(neigh_seq_start);
3233
3234void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3235{
3236 struct neigh_seq_state *state;
3237 void *rc;
3238
3239 if (v == SEQ_START_TOKEN) {
3240 rc = neigh_get_first(seq);
3241 goto out;
3242 }
3243
3244 state = seq->private;
3245 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3246 rc = neigh_get_next(seq, v, NULL);
3247 if (rc)
3248 goto out;
3249 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3250 rc = pneigh_get_first(seq);
3251 } else {
3252 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3253 rc = pneigh_get_next(seq, v, NULL);
3254 }
3255out:
3256 ++(*pos);
3257 return rc;
3258}
3259EXPORT_SYMBOL(neigh_seq_next);
3260
3261void neigh_seq_stop(struct seq_file *seq, void *v)
3262 __releases(tbl->lock)
3263 __releases(rcu_bh)
3264{
3265 struct neigh_seq_state *state = seq->private;
3266 struct neigh_table *tbl = state->tbl;
3267
3268 read_unlock(&tbl->lock);
3269 rcu_read_unlock_bh();
3270}
3271EXPORT_SYMBOL(neigh_seq_stop);
3272
3273/* statistics via seq_file */
3274
3275static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3276{
3277 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3278 int cpu;
3279
3280 if (*pos == 0)
3281 return SEQ_START_TOKEN;
3282
3283 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3284 if (!cpu_possible(cpu))
3285 continue;
3286 *pos = cpu+1;
3287 return per_cpu_ptr(tbl->stats, cpu);
3288 }
3289 return NULL;
3290}
3291
3292static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3293{
3294 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3295 int cpu;
3296
3297 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3298 if (!cpu_possible(cpu))
3299 continue;
3300 *pos = cpu+1;
3301 return per_cpu_ptr(tbl->stats, cpu);
3302 }
3303 (*pos)++;
3304 return NULL;
3305}
3306
3307static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3308{
3309
3310}
3311
3312static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3313{
3314 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3315 struct neigh_statistics *st = v;
3316
3317 if (v == SEQ_START_TOKEN) {
3318 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3319 return 0;
3320 }
3321
3322 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3323 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3324 atomic_read(&tbl->entries),
3325
3326 st->allocs,
3327 st->destroys,
3328 st->hash_grows,
3329
3330 st->lookups,
3331 st->hits,
3332
3333 st->res_failed,
3334
3335 st->rcv_probes_mcast,
3336 st->rcv_probes_ucast,
3337
3338 st->periodic_gc_runs,
3339 st->forced_gc_runs,
3340 st->unres_discards,
3341 st->table_fulls
3342 );
3343
3344 return 0;
3345}
3346
3347static const struct seq_operations neigh_stat_seq_ops = {
3348 .start = neigh_stat_seq_start,
3349 .next = neigh_stat_seq_next,
3350 .stop = neigh_stat_seq_stop,
3351 .show = neigh_stat_seq_show,
3352};
3353#endif /* CONFIG_PROC_FS */
3354
3355static void __neigh_notify(struct neighbour *n, int type, int flags,
3356 u32 pid)
3357{
3358 struct net *net = dev_net(n->dev);
3359 struct sk_buff *skb;
3360 int err = -ENOBUFS;
3361
3362 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3363 if (skb == NULL)
3364 goto errout;
3365
3366 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3367 if (err < 0) {
3368 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3369 WARN_ON(err == -EMSGSIZE);
3370 kfree_skb(skb);
3371 goto errout;
3372 }
3373 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3374 return;
3375errout:
3376 if (err < 0)
3377 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3378}
3379
3380void neigh_app_ns(struct neighbour *n)
3381{
3382 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3383}
3384EXPORT_SYMBOL(neigh_app_ns);
3385
3386#ifdef CONFIG_SYSCTL
3387static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3388
3389static int proc_unres_qlen(struct ctl_table *ctl, int write,
3390 void *buffer, size_t *lenp, loff_t *ppos)
3391{
3392 int size, ret;
3393 struct ctl_table tmp = *ctl;
3394
3395 tmp.extra1 = SYSCTL_ZERO;
3396 tmp.extra2 = &unres_qlen_max;
3397 tmp.data = &size;
3398
3399 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3400 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3401
3402 if (write && !ret)
3403 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3404 return ret;
3405}
3406
3407static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3408 int family)
3409{
3410 switch (family) {
3411 case AF_INET:
3412 return __in_dev_arp_parms_get_rcu(dev);
3413 case AF_INET6:
3414 return __in6_dev_nd_parms_get_rcu(dev);
3415 }
3416 return NULL;
3417}
3418
3419static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3420 int index)
3421{
3422 struct net_device *dev;
3423 int family = neigh_parms_family(p);
3424
3425 rcu_read_lock();
3426 for_each_netdev_rcu(net, dev) {
3427 struct neigh_parms *dst_p =
3428 neigh_get_dev_parms_rcu(dev, family);
3429
3430 if (dst_p && !test_bit(index, dst_p->data_state))
3431 dst_p->data[index] = p->data[index];
3432 }
3433 rcu_read_unlock();
3434}
3435
3436static void neigh_proc_update(struct ctl_table *ctl, int write)
3437{
3438 struct net_device *dev = ctl->extra1;
3439 struct neigh_parms *p = ctl->extra2;
3440 struct net *net = neigh_parms_net(p);
3441 int index = (int *) ctl->data - p->data;
3442
3443 if (!write)
3444 return;
3445
3446 set_bit(index, p->data_state);
3447 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3448 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3449 if (!dev) /* NULL dev means this is default value */
3450 neigh_copy_dflt_parms(net, p, index);
3451}
3452
3453static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3454 void *buffer, size_t *lenp,
3455 loff_t *ppos)
3456{
3457 struct ctl_table tmp = *ctl;
3458 int ret;
3459
3460 tmp.extra1 = SYSCTL_ZERO;
3461 tmp.extra2 = SYSCTL_INT_MAX;
3462
3463 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3464 neigh_proc_update(ctl, write);
3465 return ret;
3466}
3467
3468int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3469 size_t *lenp, loff_t *ppos)
3470{
3471 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3472
3473 neigh_proc_update(ctl, write);
3474 return ret;
3475}
3476EXPORT_SYMBOL(neigh_proc_dointvec);
3477
3478int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3479 size_t *lenp, loff_t *ppos)
3480{
3481 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3482
3483 neigh_proc_update(ctl, write);
3484 return ret;
3485}
3486EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3487
3488static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3489 void *buffer, size_t *lenp,
3490 loff_t *ppos)
3491{
3492 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3493
3494 neigh_proc_update(ctl, write);
3495 return ret;
3496}
3497
3498int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3499 void *buffer, size_t *lenp, loff_t *ppos)
3500{
3501 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3502
3503 neigh_proc_update(ctl, write);
3504 return ret;
3505}
3506EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3507
3508static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3509 void *buffer, size_t *lenp,
3510 loff_t *ppos)
3511{
3512 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3513
3514 neigh_proc_update(ctl, write);
3515 return ret;
3516}
3517
3518static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3519 void *buffer, size_t *lenp,
3520 loff_t *ppos)
3521{
3522 struct neigh_parms *p = ctl->extra2;
3523 int ret;
3524
3525 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3526 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3527 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3528 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3529 else
3530 ret = -1;
3531
3532 if (write && ret == 0) {
3533 /* update reachable_time as well, otherwise, the change will
3534 * only be effective after the next time neigh_periodic_work
3535 * decides to recompute it
3536 */
3537 p->reachable_time =
3538 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3539 }
3540 return ret;
3541}
3542
3543#define NEIGH_PARMS_DATA_OFFSET(index) \
3544 (&((struct neigh_parms *) 0)->data[index])
3545
3546#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3547 [NEIGH_VAR_ ## attr] = { \
3548 .procname = name, \
3549 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3550 .maxlen = sizeof(int), \
3551 .mode = mval, \
3552 .proc_handler = proc, \
3553 }
3554
3555#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3556 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3557
3558#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3559 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3560
3561#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3562 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3563
3564#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3565 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3566
3567#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3568 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3569
3570static struct neigh_sysctl_table {
3571 struct ctl_table_header *sysctl_header;
3572 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3573} neigh_sysctl_template __read_mostly = {
3574 .neigh_vars = {
3575 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3576 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3577 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3578 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3579 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3580 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3581 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3582 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3583 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3584 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3585 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3586 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3587 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3588 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3589 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3590 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3591 [NEIGH_VAR_GC_INTERVAL] = {
3592 .procname = "gc_interval",
3593 .maxlen = sizeof(int),
3594 .mode = 0644,
3595 .proc_handler = proc_dointvec_jiffies,
3596 },
3597 [NEIGH_VAR_GC_THRESH1] = {
3598 .procname = "gc_thresh1",
3599 .maxlen = sizeof(int),
3600 .mode = 0644,
3601 .extra1 = SYSCTL_ZERO,
3602 .extra2 = SYSCTL_INT_MAX,
3603 .proc_handler = proc_dointvec_minmax,
3604 },
3605 [NEIGH_VAR_GC_THRESH2] = {
3606 .procname = "gc_thresh2",
3607 .maxlen = sizeof(int),
3608 .mode = 0644,
3609 .extra1 = SYSCTL_ZERO,
3610 .extra2 = SYSCTL_INT_MAX,
3611 .proc_handler = proc_dointvec_minmax,
3612 },
3613 [NEIGH_VAR_GC_THRESH3] = {
3614 .procname = "gc_thresh3",
3615 .maxlen = sizeof(int),
3616 .mode = 0644,
3617 .extra1 = SYSCTL_ZERO,
3618 .extra2 = SYSCTL_INT_MAX,
3619 .proc_handler = proc_dointvec_minmax,
3620 },
3621 {},
3622 },
3623};
3624
3625int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3626 proc_handler *handler)
3627{
3628 int i;
3629 struct neigh_sysctl_table *t;
3630 const char *dev_name_source;
3631 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3632 char *p_name;
3633
3634 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3635 if (!t)
3636 goto err;
3637
3638 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3639 t->neigh_vars[i].data += (long) p;
3640 t->neigh_vars[i].extra1 = dev;
3641 t->neigh_vars[i].extra2 = p;
3642 }
3643
3644 if (dev) {
3645 dev_name_source = dev->name;
3646 /* Terminate the table early */
3647 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3648 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3649 } else {
3650 struct neigh_table *tbl = p->tbl;
3651 dev_name_source = "default";
3652 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3653 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3654 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3655 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3656 }
3657
3658 if (handler) {
3659 /* RetransTime */
3660 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3661 /* ReachableTime */
3662 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3663 /* RetransTime (in milliseconds)*/
3664 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3665 /* ReachableTime (in milliseconds) */
3666 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3667 } else {
3668 /* Those handlers will update p->reachable_time after
3669 * base_reachable_time(_ms) is set to ensure the new timer starts being
3670 * applied after the next neighbour update instead of waiting for
3671 * neigh_periodic_work to update its value (can be multiple minutes)
3672 * So any handler that replaces them should do this as well
3673 */
3674 /* ReachableTime */
3675 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3676 neigh_proc_base_reachable_time;
3677 /* ReachableTime (in milliseconds) */
3678 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3679 neigh_proc_base_reachable_time;
3680 }
3681
3682 /* Don't export sysctls to unprivileged users */
3683 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3684 t->neigh_vars[0].procname = NULL;
3685
3686 switch (neigh_parms_family(p)) {
3687 case AF_INET:
3688 p_name = "ipv4";
3689 break;
3690 case AF_INET6:
3691 p_name = "ipv6";
3692 break;
3693 default:
3694 BUG();
3695 }
3696
3697 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3698 p_name, dev_name_source);
3699 t->sysctl_header =
3700 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3701 if (!t->sysctl_header)
3702 goto free;
3703
3704 p->sysctl_table = t;
3705 return 0;
3706
3707free:
3708 kfree(t);
3709err:
3710 return -ENOBUFS;
3711}
3712EXPORT_SYMBOL(neigh_sysctl_register);
3713
3714void neigh_sysctl_unregister(struct neigh_parms *p)
3715{
3716 if (p->sysctl_table) {
3717 struct neigh_sysctl_table *t = p->sysctl_table;
3718 p->sysctl_table = NULL;
3719 unregister_net_sysctl_table(t->sysctl_header);
3720 kfree(t);
3721 }
3722}
3723EXPORT_SYMBOL(neigh_sysctl_unregister);
3724
3725#endif /* CONFIG_SYSCTL */
3726
3727static int __init neigh_init(void)
3728{
3729 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3730 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3731 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3732
3733 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3734 0);
3735 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3736
3737 return 0;
3738}
3739
3740subsys_initcall(neigh_init);
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/socket.h>
25#include <linux/netdevice.h>
26#include <linux/proc_fs.h>
27#ifdef CONFIG_SYSCTL
28#include <linux/sysctl.h>
29#endif
30#include <linux/times.h>
31#include <net/net_namespace.h>
32#include <net/neighbour.h>
33#include <net/dst.h>
34#include <net/sock.h>
35#include <net/netevent.h>
36#include <net/netlink.h>
37#include <linux/rtnetlink.h>
38#include <linux/random.h>
39#include <linux/string.h>
40#include <linux/log2.h>
41#include <linux/inetdevice.h>
42#include <net/addrconf.h>
43
44#define DEBUG
45#define NEIGH_DEBUG 1
46#define neigh_dbg(level, fmt, ...) \
47do { \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
50} while (0)
51
52#define PNEIGH_HASHMASK 0xF
53
54static void neigh_timer_handler(unsigned long arg);
55static void __neigh_notify(struct neighbour *n, int type, int flags);
56static void neigh_update_notify(struct neighbour *neigh);
57static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
58
59#ifdef CONFIG_PROC_FS
60static const struct file_operations neigh_stat_seq_fops;
61#endif
62
63/*
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
65
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
70 cache.
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
73
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
77
78 Reference count prevents destruction.
79
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
82 - timer
83 - resolution queue
84
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
89 */
90
91static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
92{
93 kfree_skb(skb);
94 return -ENETDOWN;
95}
96
97static void neigh_cleanup_and_release(struct neighbour *neigh)
98{
99 if (neigh->parms->neigh_cleanup)
100 neigh->parms->neigh_cleanup(neigh);
101
102 __neigh_notify(neigh, RTM_DELNEIGH, 0);
103 neigh_release(neigh);
104}
105
106/*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
112unsigned long neigh_rand_reach_time(unsigned long base)
113{
114 return base ? (prandom_u32() % base) + (base >> 1) : 0;
115}
116EXPORT_SYMBOL(neigh_rand_reach_time);
117
118
119static int neigh_forced_gc(struct neigh_table *tbl)
120{
121 int shrunk = 0;
122 int i;
123 struct neigh_hash_table *nht;
124
125 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126
127 write_lock_bh(&tbl->lock);
128 nht = rcu_dereference_protected(tbl->nht,
129 lockdep_is_held(&tbl->lock));
130 for (i = 0; i < (1 << nht->hash_shift); i++) {
131 struct neighbour *n;
132 struct neighbour __rcu **np;
133
134 np = &nht->hash_buckets[i];
135 while ((n = rcu_dereference_protected(*np,
136 lockdep_is_held(&tbl->lock))) != NULL) {
137 /* Neighbour record may be discarded if:
138 * - nobody refers to it.
139 * - it is not permanent
140 */
141 write_lock(&n->lock);
142 if (atomic_read(&n->refcnt) == 1 &&
143 !(n->nud_state & NUD_PERMANENT)) {
144 rcu_assign_pointer(*np,
145 rcu_dereference_protected(n->next,
146 lockdep_is_held(&tbl->lock)));
147 n->dead = 1;
148 shrunk = 1;
149 write_unlock(&n->lock);
150 neigh_cleanup_and_release(n);
151 continue;
152 }
153 write_unlock(&n->lock);
154 np = &n->next;
155 }
156 }
157
158 tbl->last_flush = jiffies;
159
160 write_unlock_bh(&tbl->lock);
161
162 return shrunk;
163}
164
165static void neigh_add_timer(struct neighbour *n, unsigned long when)
166{
167 neigh_hold(n);
168 if (unlikely(mod_timer(&n->timer, when))) {
169 printk("NEIGH: BUG, double timer add, state is %x\n",
170 n->nud_state);
171 dump_stack();
172 }
173}
174
175static int neigh_del_timer(struct neighbour *n)
176{
177 if ((n->nud_state & NUD_IN_TIMER) &&
178 del_timer(&n->timer)) {
179 neigh_release(n);
180 return 1;
181 }
182 return 0;
183}
184
185static void pneigh_queue_purge(struct sk_buff_head *list)
186{
187 struct sk_buff *skb;
188
189 while ((skb = skb_dequeue(list)) != NULL) {
190 dev_put(skb->dev);
191 kfree_skb(skb);
192 }
193}
194
195static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
196{
197 int i;
198 struct neigh_hash_table *nht;
199
200 nht = rcu_dereference_protected(tbl->nht,
201 lockdep_is_held(&tbl->lock));
202
203 for (i = 0; i < (1 << nht->hash_shift); i++) {
204 struct neighbour *n;
205 struct neighbour __rcu **np = &nht->hash_buckets[i];
206
207 while ((n = rcu_dereference_protected(*np,
208 lockdep_is_held(&tbl->lock))) != NULL) {
209 if (dev && n->dev != dev) {
210 np = &n->next;
211 continue;
212 }
213 rcu_assign_pointer(*np,
214 rcu_dereference_protected(n->next,
215 lockdep_is_held(&tbl->lock)));
216 write_lock(&n->lock);
217 neigh_del_timer(n);
218 n->dead = 1;
219
220 if (atomic_read(&n->refcnt) != 1) {
221 /* The most unpleasant situation.
222 We must destroy neighbour entry,
223 but someone still uses it.
224
225 The destroy will be delayed until
226 the last user releases us, but
227 we must kill timers etc. and move
228 it to safe state.
229 */
230 __skb_queue_purge(&n->arp_queue);
231 n->arp_queue_len_bytes = 0;
232 n->output = neigh_blackhole;
233 if (n->nud_state & NUD_VALID)
234 n->nud_state = NUD_NOARP;
235 else
236 n->nud_state = NUD_NONE;
237 neigh_dbg(2, "neigh %p is stray\n", n);
238 }
239 write_unlock(&n->lock);
240 neigh_cleanup_and_release(n);
241 }
242 }
243}
244
245void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
246{
247 write_lock_bh(&tbl->lock);
248 neigh_flush_dev(tbl, dev);
249 write_unlock_bh(&tbl->lock);
250}
251EXPORT_SYMBOL(neigh_changeaddr);
252
253int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
254{
255 write_lock_bh(&tbl->lock);
256 neigh_flush_dev(tbl, dev);
257 pneigh_ifdown(tbl, dev);
258 write_unlock_bh(&tbl->lock);
259
260 del_timer_sync(&tbl->proxy_timer);
261 pneigh_queue_purge(&tbl->proxy_queue);
262 return 0;
263}
264EXPORT_SYMBOL(neigh_ifdown);
265
266static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
267{
268 struct neighbour *n = NULL;
269 unsigned long now = jiffies;
270 int entries;
271
272 entries = atomic_inc_return(&tbl->entries) - 1;
273 if (entries >= tbl->gc_thresh3 ||
274 (entries >= tbl->gc_thresh2 &&
275 time_after(now, tbl->last_flush + 5 * HZ))) {
276 if (!neigh_forced_gc(tbl) &&
277 entries >= tbl->gc_thresh3) {
278 net_info_ratelimited("%s: neighbor table overflow!\n",
279 tbl->id);
280 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
281 goto out_entries;
282 }
283 }
284
285 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
286 if (!n)
287 goto out_entries;
288
289 __skb_queue_head_init(&n->arp_queue);
290 rwlock_init(&n->lock);
291 seqlock_init(&n->ha_lock);
292 n->updated = n->used = now;
293 n->nud_state = NUD_NONE;
294 n->output = neigh_blackhole;
295 seqlock_init(&n->hh.hh_lock);
296 n->parms = neigh_parms_clone(&tbl->parms);
297 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
298
299 NEIGH_CACHE_STAT_INC(tbl, allocs);
300 n->tbl = tbl;
301 atomic_set(&n->refcnt, 1);
302 n->dead = 1;
303out:
304 return n;
305
306out_entries:
307 atomic_dec(&tbl->entries);
308 goto out;
309}
310
311static void neigh_get_hash_rnd(u32 *x)
312{
313 get_random_bytes(x, sizeof(*x));
314 *x |= 1;
315}
316
317static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
318{
319 size_t size = (1 << shift) * sizeof(struct neighbour *);
320 struct neigh_hash_table *ret;
321 struct neighbour __rcu **buckets;
322 int i;
323
324 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
325 if (!ret)
326 return NULL;
327 if (size <= PAGE_SIZE)
328 buckets = kzalloc(size, GFP_ATOMIC);
329 else
330 buckets = (struct neighbour __rcu **)
331 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
332 get_order(size));
333 if (!buckets) {
334 kfree(ret);
335 return NULL;
336 }
337 ret->hash_buckets = buckets;
338 ret->hash_shift = shift;
339 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
340 neigh_get_hash_rnd(&ret->hash_rnd[i]);
341 return ret;
342}
343
344static void neigh_hash_free_rcu(struct rcu_head *head)
345{
346 struct neigh_hash_table *nht = container_of(head,
347 struct neigh_hash_table,
348 rcu);
349 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
350 struct neighbour __rcu **buckets = nht->hash_buckets;
351
352 if (size <= PAGE_SIZE)
353 kfree(buckets);
354 else
355 free_pages((unsigned long)buckets, get_order(size));
356 kfree(nht);
357}
358
359static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
360 unsigned long new_shift)
361{
362 unsigned int i, hash;
363 struct neigh_hash_table *new_nht, *old_nht;
364
365 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
366
367 old_nht = rcu_dereference_protected(tbl->nht,
368 lockdep_is_held(&tbl->lock));
369 new_nht = neigh_hash_alloc(new_shift);
370 if (!new_nht)
371 return old_nht;
372
373 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
374 struct neighbour *n, *next;
375
376 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
377 lockdep_is_held(&tbl->lock));
378 n != NULL;
379 n = next) {
380 hash = tbl->hash(n->primary_key, n->dev,
381 new_nht->hash_rnd);
382
383 hash >>= (32 - new_nht->hash_shift);
384 next = rcu_dereference_protected(n->next,
385 lockdep_is_held(&tbl->lock));
386
387 rcu_assign_pointer(n->next,
388 rcu_dereference_protected(
389 new_nht->hash_buckets[hash],
390 lockdep_is_held(&tbl->lock)));
391 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
392 }
393 }
394
395 rcu_assign_pointer(tbl->nht, new_nht);
396 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
397 return new_nht;
398}
399
400struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
401 struct net_device *dev)
402{
403 struct neighbour *n;
404
405 NEIGH_CACHE_STAT_INC(tbl, lookups);
406
407 rcu_read_lock_bh();
408 n = __neigh_lookup_noref(tbl, pkey, dev);
409 if (n) {
410 if (!atomic_inc_not_zero(&n->refcnt))
411 n = NULL;
412 NEIGH_CACHE_STAT_INC(tbl, hits);
413 }
414
415 rcu_read_unlock_bh();
416 return n;
417}
418EXPORT_SYMBOL(neigh_lookup);
419
420struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
421 const void *pkey)
422{
423 struct neighbour *n;
424 int key_len = tbl->key_len;
425 u32 hash_val;
426 struct neigh_hash_table *nht;
427
428 NEIGH_CACHE_STAT_INC(tbl, lookups);
429
430 rcu_read_lock_bh();
431 nht = rcu_dereference_bh(tbl->nht);
432 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
433
434 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
435 n != NULL;
436 n = rcu_dereference_bh(n->next)) {
437 if (!memcmp(n->primary_key, pkey, key_len) &&
438 net_eq(dev_net(n->dev), net)) {
439 if (!atomic_inc_not_zero(&n->refcnt))
440 n = NULL;
441 NEIGH_CACHE_STAT_INC(tbl, hits);
442 break;
443 }
444 }
445
446 rcu_read_unlock_bh();
447 return n;
448}
449EXPORT_SYMBOL(neigh_lookup_nodev);
450
451struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
452 struct net_device *dev, bool want_ref)
453{
454 u32 hash_val;
455 int key_len = tbl->key_len;
456 int error;
457 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
458 struct neigh_hash_table *nht;
459
460 if (!n) {
461 rc = ERR_PTR(-ENOBUFS);
462 goto out;
463 }
464
465 memcpy(n->primary_key, pkey, key_len);
466 n->dev = dev;
467 dev_hold(dev);
468
469 /* Protocol specific setup. */
470 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
471 rc = ERR_PTR(error);
472 goto out_neigh_release;
473 }
474
475 if (dev->netdev_ops->ndo_neigh_construct) {
476 error = dev->netdev_ops->ndo_neigh_construct(n);
477 if (error < 0) {
478 rc = ERR_PTR(error);
479 goto out_neigh_release;
480 }
481 }
482
483 /* Device specific setup. */
484 if (n->parms->neigh_setup &&
485 (error = n->parms->neigh_setup(n)) < 0) {
486 rc = ERR_PTR(error);
487 goto out_neigh_release;
488 }
489
490 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
491
492 write_lock_bh(&tbl->lock);
493 nht = rcu_dereference_protected(tbl->nht,
494 lockdep_is_held(&tbl->lock));
495
496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498
499 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500
501 if (n->parms->dead) {
502 rc = ERR_PTR(-EINVAL);
503 goto out_tbl_unlock;
504 }
505
506 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
507 lockdep_is_held(&tbl->lock));
508 n1 != NULL;
509 n1 = rcu_dereference_protected(n1->next,
510 lockdep_is_held(&tbl->lock))) {
511 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
512 if (want_ref)
513 neigh_hold(n1);
514 rc = n1;
515 goto out_tbl_unlock;
516 }
517 }
518
519 n->dead = 0;
520 if (want_ref)
521 neigh_hold(n);
522 rcu_assign_pointer(n->next,
523 rcu_dereference_protected(nht->hash_buckets[hash_val],
524 lockdep_is_held(&tbl->lock)));
525 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
526 write_unlock_bh(&tbl->lock);
527 neigh_dbg(2, "neigh %p is created\n", n);
528 rc = n;
529out:
530 return rc;
531out_tbl_unlock:
532 write_unlock_bh(&tbl->lock);
533out_neigh_release:
534 neigh_release(n);
535 goto out;
536}
537EXPORT_SYMBOL(__neigh_create);
538
539static u32 pneigh_hash(const void *pkey, int key_len)
540{
541 u32 hash_val = *(u32 *)(pkey + key_len - 4);
542 hash_val ^= (hash_val >> 16);
543 hash_val ^= hash_val >> 8;
544 hash_val ^= hash_val >> 4;
545 hash_val &= PNEIGH_HASHMASK;
546 return hash_val;
547}
548
549static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
550 struct net *net,
551 const void *pkey,
552 int key_len,
553 struct net_device *dev)
554{
555 while (n) {
556 if (!memcmp(n->key, pkey, key_len) &&
557 net_eq(pneigh_net(n), net) &&
558 (n->dev == dev || !n->dev))
559 return n;
560 n = n->next;
561 }
562 return NULL;
563}
564
565struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
566 struct net *net, const void *pkey, struct net_device *dev)
567{
568 int key_len = tbl->key_len;
569 u32 hash_val = pneigh_hash(pkey, key_len);
570
571 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
572 net, pkey, key_len, dev);
573}
574EXPORT_SYMBOL_GPL(__pneigh_lookup);
575
576struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
577 struct net *net, const void *pkey,
578 struct net_device *dev, int creat)
579{
580 struct pneigh_entry *n;
581 int key_len = tbl->key_len;
582 u32 hash_val = pneigh_hash(pkey, key_len);
583
584 read_lock_bh(&tbl->lock);
585 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
586 net, pkey, key_len, dev);
587 read_unlock_bh(&tbl->lock);
588
589 if (n || !creat)
590 goto out;
591
592 ASSERT_RTNL();
593
594 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
595 if (!n)
596 goto out;
597
598 write_pnet(&n->net, net);
599 memcpy(n->key, pkey, key_len);
600 n->dev = dev;
601 if (dev)
602 dev_hold(dev);
603
604 if (tbl->pconstructor && tbl->pconstructor(n)) {
605 if (dev)
606 dev_put(dev);
607 kfree(n);
608 n = NULL;
609 goto out;
610 }
611
612 write_lock_bh(&tbl->lock);
613 n->next = tbl->phash_buckets[hash_val];
614 tbl->phash_buckets[hash_val] = n;
615 write_unlock_bh(&tbl->lock);
616out:
617 return n;
618}
619EXPORT_SYMBOL(pneigh_lookup);
620
621
622int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
623 struct net_device *dev)
624{
625 struct pneigh_entry *n, **np;
626 int key_len = tbl->key_len;
627 u32 hash_val = pneigh_hash(pkey, key_len);
628
629 write_lock_bh(&tbl->lock);
630 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
631 np = &n->next) {
632 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
633 net_eq(pneigh_net(n), net)) {
634 *np = n->next;
635 write_unlock_bh(&tbl->lock);
636 if (tbl->pdestructor)
637 tbl->pdestructor(n);
638 if (n->dev)
639 dev_put(n->dev);
640 kfree(n);
641 return 0;
642 }
643 }
644 write_unlock_bh(&tbl->lock);
645 return -ENOENT;
646}
647
648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
649{
650 struct pneigh_entry *n, **np;
651 u32 h;
652
653 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
654 np = &tbl->phash_buckets[h];
655 while ((n = *np) != NULL) {
656 if (!dev || n->dev == dev) {
657 *np = n->next;
658 if (tbl->pdestructor)
659 tbl->pdestructor(n);
660 if (n->dev)
661 dev_put(n->dev);
662 kfree(n);
663 continue;
664 }
665 np = &n->next;
666 }
667 }
668 return -ENOENT;
669}
670
671static void neigh_parms_destroy(struct neigh_parms *parms);
672
673static inline void neigh_parms_put(struct neigh_parms *parms)
674{
675 if (atomic_dec_and_test(&parms->refcnt))
676 neigh_parms_destroy(parms);
677}
678
679/*
680 * neighbour must already be out of the table;
681 *
682 */
683void neigh_destroy(struct neighbour *neigh)
684{
685 struct net_device *dev = neigh->dev;
686
687 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
688
689 if (!neigh->dead) {
690 pr_warn("Destroying alive neighbour %p\n", neigh);
691 dump_stack();
692 return;
693 }
694
695 if (neigh_del_timer(neigh))
696 pr_warn("Impossible event\n");
697
698 write_lock_bh(&neigh->lock);
699 __skb_queue_purge(&neigh->arp_queue);
700 write_unlock_bh(&neigh->lock);
701 neigh->arp_queue_len_bytes = 0;
702
703 if (dev->netdev_ops->ndo_neigh_destroy)
704 dev->netdev_ops->ndo_neigh_destroy(neigh);
705
706 dev_put(dev);
707 neigh_parms_put(neigh->parms);
708
709 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
710
711 atomic_dec(&neigh->tbl->entries);
712 kfree_rcu(neigh, rcu);
713}
714EXPORT_SYMBOL(neigh_destroy);
715
716/* Neighbour state is suspicious;
717 disable fast path.
718
719 Called with write_locked neigh.
720 */
721static void neigh_suspect(struct neighbour *neigh)
722{
723 neigh_dbg(2, "neigh %p is suspected\n", neigh);
724
725 neigh->output = neigh->ops->output;
726}
727
728/* Neighbour state is OK;
729 enable fast path.
730
731 Called with write_locked neigh.
732 */
733static void neigh_connect(struct neighbour *neigh)
734{
735 neigh_dbg(2, "neigh %p is connected\n", neigh);
736
737 neigh->output = neigh->ops->connected_output;
738}
739
740static void neigh_periodic_work(struct work_struct *work)
741{
742 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
743 struct neighbour *n;
744 struct neighbour __rcu **np;
745 unsigned int i;
746 struct neigh_hash_table *nht;
747
748 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
749
750 write_lock_bh(&tbl->lock);
751 nht = rcu_dereference_protected(tbl->nht,
752 lockdep_is_held(&tbl->lock));
753
754 /*
755 * periodically recompute ReachableTime from random function
756 */
757
758 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
759 struct neigh_parms *p;
760 tbl->last_rand = jiffies;
761 list_for_each_entry(p, &tbl->parms_list, list)
762 p->reachable_time =
763 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
764 }
765
766 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
767 goto out;
768
769 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
770 np = &nht->hash_buckets[i];
771
772 while ((n = rcu_dereference_protected(*np,
773 lockdep_is_held(&tbl->lock))) != NULL) {
774 unsigned int state;
775
776 write_lock(&n->lock);
777
778 state = n->nud_state;
779 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
780 write_unlock(&n->lock);
781 goto next_elt;
782 }
783
784 if (time_before(n->used, n->confirmed))
785 n->used = n->confirmed;
786
787 if (atomic_read(&n->refcnt) == 1 &&
788 (state == NUD_FAILED ||
789 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
790 *np = n->next;
791 n->dead = 1;
792 write_unlock(&n->lock);
793 neigh_cleanup_and_release(n);
794 continue;
795 }
796 write_unlock(&n->lock);
797
798next_elt:
799 np = &n->next;
800 }
801 /*
802 * It's fine to release lock here, even if hash table
803 * grows while we are preempted.
804 */
805 write_unlock_bh(&tbl->lock);
806 cond_resched();
807 write_lock_bh(&tbl->lock);
808 nht = rcu_dereference_protected(tbl->nht,
809 lockdep_is_held(&tbl->lock));
810 }
811out:
812 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
813 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
814 * BASE_REACHABLE_TIME.
815 */
816 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
817 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
818 write_unlock_bh(&tbl->lock);
819}
820
821static __inline__ int neigh_max_probes(struct neighbour *n)
822{
823 struct neigh_parms *p = n->parms;
824 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
825 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
826 NEIGH_VAR(p, MCAST_PROBES));
827}
828
829static void neigh_invalidate(struct neighbour *neigh)
830 __releases(neigh->lock)
831 __acquires(neigh->lock)
832{
833 struct sk_buff *skb;
834
835 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
836 neigh_dbg(2, "neigh %p is failed\n", neigh);
837 neigh->updated = jiffies;
838
839 /* It is very thin place. report_unreachable is very complicated
840 routine. Particularly, it can hit the same neighbour entry!
841
842 So that, we try to be accurate and avoid dead loop. --ANK
843 */
844 while (neigh->nud_state == NUD_FAILED &&
845 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
846 write_unlock(&neigh->lock);
847 neigh->ops->error_report(neigh, skb);
848 write_lock(&neigh->lock);
849 }
850 __skb_queue_purge(&neigh->arp_queue);
851 neigh->arp_queue_len_bytes = 0;
852}
853
854static void neigh_probe(struct neighbour *neigh)
855 __releases(neigh->lock)
856{
857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
858 /* keep skb alive even if arp_queue overflows */
859 if (skb)
860 skb = skb_clone(skb, GFP_ATOMIC);
861 write_unlock(&neigh->lock);
862 neigh->ops->solicit(neigh, skb);
863 atomic_inc(&neigh->probes);
864 kfree_skb(skb);
865}
866
867/* Called when a timer expires for a neighbour entry. */
868
869static void neigh_timer_handler(unsigned long arg)
870{
871 unsigned long now, next;
872 struct neighbour *neigh = (struct neighbour *)arg;
873 unsigned int state;
874 int notify = 0;
875
876 write_lock(&neigh->lock);
877
878 state = neigh->nud_state;
879 now = jiffies;
880 next = now + HZ;
881
882 if (!(state & NUD_IN_TIMER))
883 goto out;
884
885 if (state & NUD_REACHABLE) {
886 if (time_before_eq(now,
887 neigh->confirmed + neigh->parms->reachable_time)) {
888 neigh_dbg(2, "neigh %p is still alive\n", neigh);
889 next = neigh->confirmed + neigh->parms->reachable_time;
890 } else if (time_before_eq(now,
891 neigh->used +
892 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
893 neigh_dbg(2, "neigh %p is delayed\n", neigh);
894 neigh->nud_state = NUD_DELAY;
895 neigh->updated = jiffies;
896 neigh_suspect(neigh);
897 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
898 } else {
899 neigh_dbg(2, "neigh %p is suspected\n", neigh);
900 neigh->nud_state = NUD_STALE;
901 neigh->updated = jiffies;
902 neigh_suspect(neigh);
903 notify = 1;
904 }
905 } else if (state & NUD_DELAY) {
906 if (time_before_eq(now,
907 neigh->confirmed +
908 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
909 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
910 neigh->nud_state = NUD_REACHABLE;
911 neigh->updated = jiffies;
912 neigh_connect(neigh);
913 notify = 1;
914 next = neigh->confirmed + neigh->parms->reachable_time;
915 } else {
916 neigh_dbg(2, "neigh %p is probed\n", neigh);
917 neigh->nud_state = NUD_PROBE;
918 neigh->updated = jiffies;
919 atomic_set(&neigh->probes, 0);
920 notify = 1;
921 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
922 }
923 } else {
924 /* NUD_PROBE|NUD_INCOMPLETE */
925 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
926 }
927
928 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
929 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
930 neigh->nud_state = NUD_FAILED;
931 notify = 1;
932 neigh_invalidate(neigh);
933 goto out;
934 }
935
936 if (neigh->nud_state & NUD_IN_TIMER) {
937 if (time_before(next, jiffies + HZ/2))
938 next = jiffies + HZ/2;
939 if (!mod_timer(&neigh->timer, next))
940 neigh_hold(neigh);
941 }
942 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
943 neigh_probe(neigh);
944 } else {
945out:
946 write_unlock(&neigh->lock);
947 }
948
949 if (notify)
950 neigh_update_notify(neigh);
951
952 neigh_release(neigh);
953}
954
955int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
956{
957 int rc;
958 bool immediate_probe = false;
959
960 write_lock_bh(&neigh->lock);
961
962 rc = 0;
963 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
964 goto out_unlock_bh;
965 if (neigh->dead)
966 goto out_dead;
967
968 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
969 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
970 NEIGH_VAR(neigh->parms, APP_PROBES)) {
971 unsigned long next, now = jiffies;
972
973 atomic_set(&neigh->probes,
974 NEIGH_VAR(neigh->parms, UCAST_PROBES));
975 neigh->nud_state = NUD_INCOMPLETE;
976 neigh->updated = now;
977 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
978 HZ/2);
979 neigh_add_timer(neigh, next);
980 immediate_probe = true;
981 } else {
982 neigh->nud_state = NUD_FAILED;
983 neigh->updated = jiffies;
984 write_unlock_bh(&neigh->lock);
985
986 kfree_skb(skb);
987 return 1;
988 }
989 } else if (neigh->nud_state & NUD_STALE) {
990 neigh_dbg(2, "neigh %p is delayed\n", neigh);
991 neigh->nud_state = NUD_DELAY;
992 neigh->updated = jiffies;
993 neigh_add_timer(neigh, jiffies +
994 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
995 }
996
997 if (neigh->nud_state == NUD_INCOMPLETE) {
998 if (skb) {
999 while (neigh->arp_queue_len_bytes + skb->truesize >
1000 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1001 struct sk_buff *buff;
1002
1003 buff = __skb_dequeue(&neigh->arp_queue);
1004 if (!buff)
1005 break;
1006 neigh->arp_queue_len_bytes -= buff->truesize;
1007 kfree_skb(buff);
1008 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1009 }
1010 skb_dst_force(skb);
1011 __skb_queue_tail(&neigh->arp_queue, skb);
1012 neigh->arp_queue_len_bytes += skb->truesize;
1013 }
1014 rc = 1;
1015 }
1016out_unlock_bh:
1017 if (immediate_probe)
1018 neigh_probe(neigh);
1019 else
1020 write_unlock(&neigh->lock);
1021 local_bh_enable();
1022 return rc;
1023
1024out_dead:
1025 if (neigh->nud_state & NUD_STALE)
1026 goto out_unlock_bh;
1027 write_unlock_bh(&neigh->lock);
1028 kfree_skb(skb);
1029 return 1;
1030}
1031EXPORT_SYMBOL(__neigh_event_send);
1032
1033static void neigh_update_hhs(struct neighbour *neigh)
1034{
1035 struct hh_cache *hh;
1036 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1037 = NULL;
1038
1039 if (neigh->dev->header_ops)
1040 update = neigh->dev->header_ops->cache_update;
1041
1042 if (update) {
1043 hh = &neigh->hh;
1044 if (hh->hh_len) {
1045 write_seqlock_bh(&hh->hh_lock);
1046 update(hh, neigh->dev, neigh->ha);
1047 write_sequnlock_bh(&hh->hh_lock);
1048 }
1049 }
1050}
1051
1052
1053
1054/* Generic update routine.
1055 -- lladdr is new lladdr or NULL, if it is not supplied.
1056 -- new is new state.
1057 -- flags
1058 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1059 if it is different.
1060 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1061 lladdr instead of overriding it
1062 if it is different.
1063 It also allows to retain current state
1064 if lladdr is unchanged.
1065 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1066
1067 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1068 NTF_ROUTER flag.
1069 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1070 a router.
1071
1072 Caller MUST hold reference count on the entry.
1073 */
1074
1075int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1076 u32 flags)
1077{
1078 u8 old;
1079 int err;
1080 int notify = 0;
1081 struct net_device *dev;
1082 int update_isrouter = 0;
1083
1084 write_lock_bh(&neigh->lock);
1085
1086 dev = neigh->dev;
1087 old = neigh->nud_state;
1088 err = -EPERM;
1089
1090 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1091 (old & (NUD_NOARP | NUD_PERMANENT)))
1092 goto out;
1093 if (neigh->dead)
1094 goto out;
1095
1096 if (!(new & NUD_VALID)) {
1097 neigh_del_timer(neigh);
1098 if (old & NUD_CONNECTED)
1099 neigh_suspect(neigh);
1100 neigh->nud_state = new;
1101 err = 0;
1102 notify = old & NUD_VALID;
1103 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1104 (new & NUD_FAILED)) {
1105 neigh_invalidate(neigh);
1106 notify = 1;
1107 }
1108 goto out;
1109 }
1110
1111 /* Compare new lladdr with cached one */
1112 if (!dev->addr_len) {
1113 /* First case: device needs no address. */
1114 lladdr = neigh->ha;
1115 } else if (lladdr) {
1116 /* The second case: if something is already cached
1117 and a new address is proposed:
1118 - compare new & old
1119 - if they are different, check override flag
1120 */
1121 if ((old & NUD_VALID) &&
1122 !memcmp(lladdr, neigh->ha, dev->addr_len))
1123 lladdr = neigh->ha;
1124 } else {
1125 /* No address is supplied; if we know something,
1126 use it, otherwise discard the request.
1127 */
1128 err = -EINVAL;
1129 if (!(old & NUD_VALID))
1130 goto out;
1131 lladdr = neigh->ha;
1132 }
1133
1134 if (new & NUD_CONNECTED)
1135 neigh->confirmed = jiffies;
1136 neigh->updated = jiffies;
1137
1138 /* If entry was valid and address is not changed,
1139 do not change entry state, if new one is STALE.
1140 */
1141 err = 0;
1142 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1143 if (old & NUD_VALID) {
1144 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1145 update_isrouter = 0;
1146 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1147 (old & NUD_CONNECTED)) {
1148 lladdr = neigh->ha;
1149 new = NUD_STALE;
1150 } else
1151 goto out;
1152 } else {
1153 if (lladdr == neigh->ha && new == NUD_STALE &&
1154 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1155 (old & NUD_CONNECTED))
1156 )
1157 new = old;
1158 }
1159 }
1160
1161 if (new != old) {
1162 neigh_del_timer(neigh);
1163 if (new & NUD_PROBE)
1164 atomic_set(&neigh->probes, 0);
1165 if (new & NUD_IN_TIMER)
1166 neigh_add_timer(neigh, (jiffies +
1167 ((new & NUD_REACHABLE) ?
1168 neigh->parms->reachable_time :
1169 0)));
1170 neigh->nud_state = new;
1171 notify = 1;
1172 }
1173
1174 if (lladdr != neigh->ha) {
1175 write_seqlock(&neigh->ha_lock);
1176 memcpy(&neigh->ha, lladdr, dev->addr_len);
1177 write_sequnlock(&neigh->ha_lock);
1178 neigh_update_hhs(neigh);
1179 if (!(new & NUD_CONNECTED))
1180 neigh->confirmed = jiffies -
1181 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1182 notify = 1;
1183 }
1184 if (new == old)
1185 goto out;
1186 if (new & NUD_CONNECTED)
1187 neigh_connect(neigh);
1188 else
1189 neigh_suspect(neigh);
1190 if (!(old & NUD_VALID)) {
1191 struct sk_buff *skb;
1192
1193 /* Again: avoid dead loop if something went wrong */
1194
1195 while (neigh->nud_state & NUD_VALID &&
1196 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1197 struct dst_entry *dst = skb_dst(skb);
1198 struct neighbour *n2, *n1 = neigh;
1199 write_unlock_bh(&neigh->lock);
1200
1201 rcu_read_lock();
1202
1203 /* Why not just use 'neigh' as-is? The problem is that
1204 * things such as shaper, eql, and sch_teql can end up
1205 * using alternative, different, neigh objects to output
1206 * the packet in the output path. So what we need to do
1207 * here is re-lookup the top-level neigh in the path so
1208 * we can reinject the packet there.
1209 */
1210 n2 = NULL;
1211 if (dst) {
1212 n2 = dst_neigh_lookup_skb(dst, skb);
1213 if (n2)
1214 n1 = n2;
1215 }
1216 n1->output(n1, skb);
1217 if (n2)
1218 neigh_release(n2);
1219 rcu_read_unlock();
1220
1221 write_lock_bh(&neigh->lock);
1222 }
1223 __skb_queue_purge(&neigh->arp_queue);
1224 neigh->arp_queue_len_bytes = 0;
1225 }
1226out:
1227 if (update_isrouter) {
1228 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1229 (neigh->flags | NTF_ROUTER) :
1230 (neigh->flags & ~NTF_ROUTER);
1231 }
1232 write_unlock_bh(&neigh->lock);
1233
1234 if (notify)
1235 neigh_update_notify(neigh);
1236
1237 return err;
1238}
1239EXPORT_SYMBOL(neigh_update);
1240
1241/* Update the neigh to listen temporarily for probe responses, even if it is
1242 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1243 */
1244void __neigh_set_probe_once(struct neighbour *neigh)
1245{
1246 if (neigh->dead)
1247 return;
1248 neigh->updated = jiffies;
1249 if (!(neigh->nud_state & NUD_FAILED))
1250 return;
1251 neigh->nud_state = NUD_INCOMPLETE;
1252 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 neigh_add_timer(neigh,
1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255}
1256EXPORT_SYMBOL(__neigh_set_probe_once);
1257
1258struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1259 u8 *lladdr, void *saddr,
1260 struct net_device *dev)
1261{
1262 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1263 lladdr || !dev->addr_len);
1264 if (neigh)
1265 neigh_update(neigh, lladdr, NUD_STALE,
1266 NEIGH_UPDATE_F_OVERRIDE);
1267 return neigh;
1268}
1269EXPORT_SYMBOL(neigh_event_ns);
1270
1271/* called with read_lock_bh(&n->lock); */
1272static void neigh_hh_init(struct neighbour *n)
1273{
1274 struct net_device *dev = n->dev;
1275 __be16 prot = n->tbl->protocol;
1276 struct hh_cache *hh = &n->hh;
1277
1278 write_lock_bh(&n->lock);
1279
1280 /* Only one thread can come in here and initialize the
1281 * hh_cache entry.
1282 */
1283 if (!hh->hh_len)
1284 dev->header_ops->cache(n, hh, prot);
1285
1286 write_unlock_bh(&n->lock);
1287}
1288
1289/* Slow and careful. */
1290
1291int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1292{
1293 int rc = 0;
1294
1295 if (!neigh_event_send(neigh, skb)) {
1296 int err;
1297 struct net_device *dev = neigh->dev;
1298 unsigned int seq;
1299
1300 if (dev->header_ops->cache && !neigh->hh.hh_len)
1301 neigh_hh_init(neigh);
1302
1303 do {
1304 __skb_pull(skb, skb_network_offset(skb));
1305 seq = read_seqbegin(&neigh->ha_lock);
1306 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1307 neigh->ha, NULL, skb->len);
1308 } while (read_seqretry(&neigh->ha_lock, seq));
1309
1310 if (err >= 0)
1311 rc = dev_queue_xmit(skb);
1312 else
1313 goto out_kfree_skb;
1314 }
1315out:
1316 return rc;
1317out_kfree_skb:
1318 rc = -EINVAL;
1319 kfree_skb(skb);
1320 goto out;
1321}
1322EXPORT_SYMBOL(neigh_resolve_output);
1323
1324/* As fast as possible without hh cache */
1325
1326int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1327{
1328 struct net_device *dev = neigh->dev;
1329 unsigned int seq;
1330 int err;
1331
1332 do {
1333 __skb_pull(skb, skb_network_offset(skb));
1334 seq = read_seqbegin(&neigh->ha_lock);
1335 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1336 neigh->ha, NULL, skb->len);
1337 } while (read_seqretry(&neigh->ha_lock, seq));
1338
1339 if (err >= 0)
1340 err = dev_queue_xmit(skb);
1341 else {
1342 err = -EINVAL;
1343 kfree_skb(skb);
1344 }
1345 return err;
1346}
1347EXPORT_SYMBOL(neigh_connected_output);
1348
1349int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1350{
1351 return dev_queue_xmit(skb);
1352}
1353EXPORT_SYMBOL(neigh_direct_output);
1354
1355static void neigh_proxy_process(unsigned long arg)
1356{
1357 struct neigh_table *tbl = (struct neigh_table *)arg;
1358 long sched_next = 0;
1359 unsigned long now = jiffies;
1360 struct sk_buff *skb, *n;
1361
1362 spin_lock(&tbl->proxy_queue.lock);
1363
1364 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1365 long tdif = NEIGH_CB(skb)->sched_next - now;
1366
1367 if (tdif <= 0) {
1368 struct net_device *dev = skb->dev;
1369
1370 __skb_unlink(skb, &tbl->proxy_queue);
1371 if (tbl->proxy_redo && netif_running(dev)) {
1372 rcu_read_lock();
1373 tbl->proxy_redo(skb);
1374 rcu_read_unlock();
1375 } else {
1376 kfree_skb(skb);
1377 }
1378
1379 dev_put(dev);
1380 } else if (!sched_next || tdif < sched_next)
1381 sched_next = tdif;
1382 }
1383 del_timer(&tbl->proxy_timer);
1384 if (sched_next)
1385 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1386 spin_unlock(&tbl->proxy_queue.lock);
1387}
1388
1389void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1390 struct sk_buff *skb)
1391{
1392 unsigned long now = jiffies;
1393
1394 unsigned long sched_next = now + (prandom_u32() %
1395 NEIGH_VAR(p, PROXY_DELAY));
1396
1397 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1398 kfree_skb(skb);
1399 return;
1400 }
1401
1402 NEIGH_CB(skb)->sched_next = sched_next;
1403 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1404
1405 spin_lock(&tbl->proxy_queue.lock);
1406 if (del_timer(&tbl->proxy_timer)) {
1407 if (time_before(tbl->proxy_timer.expires, sched_next))
1408 sched_next = tbl->proxy_timer.expires;
1409 }
1410 skb_dst_drop(skb);
1411 dev_hold(skb->dev);
1412 __skb_queue_tail(&tbl->proxy_queue, skb);
1413 mod_timer(&tbl->proxy_timer, sched_next);
1414 spin_unlock(&tbl->proxy_queue.lock);
1415}
1416EXPORT_SYMBOL(pneigh_enqueue);
1417
1418static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1419 struct net *net, int ifindex)
1420{
1421 struct neigh_parms *p;
1422
1423 list_for_each_entry(p, &tbl->parms_list, list) {
1424 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1425 (!p->dev && !ifindex && net_eq(net, &init_net)))
1426 return p;
1427 }
1428
1429 return NULL;
1430}
1431
1432struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1433 struct neigh_table *tbl)
1434{
1435 struct neigh_parms *p;
1436 struct net *net = dev_net(dev);
1437 const struct net_device_ops *ops = dev->netdev_ops;
1438
1439 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1440 if (p) {
1441 p->tbl = tbl;
1442 atomic_set(&p->refcnt, 1);
1443 p->reachable_time =
1444 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1445 dev_hold(dev);
1446 p->dev = dev;
1447 write_pnet(&p->net, net);
1448 p->sysctl_table = NULL;
1449
1450 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1451 dev_put(dev);
1452 kfree(p);
1453 return NULL;
1454 }
1455
1456 write_lock_bh(&tbl->lock);
1457 list_add(&p->list, &tbl->parms.list);
1458 write_unlock_bh(&tbl->lock);
1459
1460 neigh_parms_data_state_cleanall(p);
1461 }
1462 return p;
1463}
1464EXPORT_SYMBOL(neigh_parms_alloc);
1465
1466static void neigh_rcu_free_parms(struct rcu_head *head)
1467{
1468 struct neigh_parms *parms =
1469 container_of(head, struct neigh_parms, rcu_head);
1470
1471 neigh_parms_put(parms);
1472}
1473
1474void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1475{
1476 if (!parms || parms == &tbl->parms)
1477 return;
1478 write_lock_bh(&tbl->lock);
1479 list_del(&parms->list);
1480 parms->dead = 1;
1481 write_unlock_bh(&tbl->lock);
1482 if (parms->dev)
1483 dev_put(parms->dev);
1484 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1485}
1486EXPORT_SYMBOL(neigh_parms_release);
1487
1488static void neigh_parms_destroy(struct neigh_parms *parms)
1489{
1490 kfree(parms);
1491}
1492
1493static struct lock_class_key neigh_table_proxy_queue_class;
1494
1495static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1496
1497void neigh_table_init(int index, struct neigh_table *tbl)
1498{
1499 unsigned long now = jiffies;
1500 unsigned long phsize;
1501
1502 INIT_LIST_HEAD(&tbl->parms_list);
1503 list_add(&tbl->parms.list, &tbl->parms_list);
1504 write_pnet(&tbl->parms.net, &init_net);
1505 atomic_set(&tbl->parms.refcnt, 1);
1506 tbl->parms.reachable_time =
1507 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1508
1509 tbl->stats = alloc_percpu(struct neigh_statistics);
1510 if (!tbl->stats)
1511 panic("cannot create neighbour cache statistics");
1512
1513#ifdef CONFIG_PROC_FS
1514 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1515 &neigh_stat_seq_fops, tbl))
1516 panic("cannot create neighbour proc dir entry");
1517#endif
1518
1519 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1520
1521 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1522 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1523
1524 if (!tbl->nht || !tbl->phash_buckets)
1525 panic("cannot allocate neighbour cache hashes");
1526
1527 if (!tbl->entry_size)
1528 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1529 tbl->key_len, NEIGH_PRIV_ALIGN);
1530 else
1531 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1532
1533 rwlock_init(&tbl->lock);
1534 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1535 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1536 tbl->parms.reachable_time);
1537 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1538 skb_queue_head_init_class(&tbl->proxy_queue,
1539 &neigh_table_proxy_queue_class);
1540
1541 tbl->last_flush = now;
1542 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1543
1544 neigh_tables[index] = tbl;
1545}
1546EXPORT_SYMBOL(neigh_table_init);
1547
1548int neigh_table_clear(int index, struct neigh_table *tbl)
1549{
1550 neigh_tables[index] = NULL;
1551 /* It is not clean... Fix it to unload IPv6 module safely */
1552 cancel_delayed_work_sync(&tbl->gc_work);
1553 del_timer_sync(&tbl->proxy_timer);
1554 pneigh_queue_purge(&tbl->proxy_queue);
1555 neigh_ifdown(tbl, NULL);
1556 if (atomic_read(&tbl->entries))
1557 pr_crit("neighbour leakage\n");
1558
1559 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1560 neigh_hash_free_rcu);
1561 tbl->nht = NULL;
1562
1563 kfree(tbl->phash_buckets);
1564 tbl->phash_buckets = NULL;
1565
1566 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1567
1568 free_percpu(tbl->stats);
1569 tbl->stats = NULL;
1570
1571 return 0;
1572}
1573EXPORT_SYMBOL(neigh_table_clear);
1574
1575static struct neigh_table *neigh_find_table(int family)
1576{
1577 struct neigh_table *tbl = NULL;
1578
1579 switch (family) {
1580 case AF_INET:
1581 tbl = neigh_tables[NEIGH_ARP_TABLE];
1582 break;
1583 case AF_INET6:
1584 tbl = neigh_tables[NEIGH_ND_TABLE];
1585 break;
1586 case AF_DECnet:
1587 tbl = neigh_tables[NEIGH_DN_TABLE];
1588 break;
1589 }
1590
1591 return tbl;
1592}
1593
1594static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1595{
1596 struct net *net = sock_net(skb->sk);
1597 struct ndmsg *ndm;
1598 struct nlattr *dst_attr;
1599 struct neigh_table *tbl;
1600 struct neighbour *neigh;
1601 struct net_device *dev = NULL;
1602 int err = -EINVAL;
1603
1604 ASSERT_RTNL();
1605 if (nlmsg_len(nlh) < sizeof(*ndm))
1606 goto out;
1607
1608 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1609 if (dst_attr == NULL)
1610 goto out;
1611
1612 ndm = nlmsg_data(nlh);
1613 if (ndm->ndm_ifindex) {
1614 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1615 if (dev == NULL) {
1616 err = -ENODEV;
1617 goto out;
1618 }
1619 }
1620
1621 tbl = neigh_find_table(ndm->ndm_family);
1622 if (tbl == NULL)
1623 return -EAFNOSUPPORT;
1624
1625 if (nla_len(dst_attr) < tbl->key_len)
1626 goto out;
1627
1628 if (ndm->ndm_flags & NTF_PROXY) {
1629 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1630 goto out;
1631 }
1632
1633 if (dev == NULL)
1634 goto out;
1635
1636 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1637 if (neigh == NULL) {
1638 err = -ENOENT;
1639 goto out;
1640 }
1641
1642 err = neigh_update(neigh, NULL, NUD_FAILED,
1643 NEIGH_UPDATE_F_OVERRIDE |
1644 NEIGH_UPDATE_F_ADMIN);
1645 neigh_release(neigh);
1646
1647out:
1648 return err;
1649}
1650
1651static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1652{
1653 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1654 struct net *net = sock_net(skb->sk);
1655 struct ndmsg *ndm;
1656 struct nlattr *tb[NDA_MAX+1];
1657 struct neigh_table *tbl;
1658 struct net_device *dev = NULL;
1659 struct neighbour *neigh;
1660 void *dst, *lladdr;
1661 int err;
1662
1663 ASSERT_RTNL();
1664 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1665 if (err < 0)
1666 goto out;
1667
1668 err = -EINVAL;
1669 if (tb[NDA_DST] == NULL)
1670 goto out;
1671
1672 ndm = nlmsg_data(nlh);
1673 if (ndm->ndm_ifindex) {
1674 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1675 if (dev == NULL) {
1676 err = -ENODEV;
1677 goto out;
1678 }
1679
1680 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1681 goto out;
1682 }
1683
1684 tbl = neigh_find_table(ndm->ndm_family);
1685 if (tbl == NULL)
1686 return -EAFNOSUPPORT;
1687
1688 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1689 goto out;
1690 dst = nla_data(tb[NDA_DST]);
1691 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1692
1693 if (ndm->ndm_flags & NTF_PROXY) {
1694 struct pneigh_entry *pn;
1695
1696 err = -ENOBUFS;
1697 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1698 if (pn) {
1699 pn->flags = ndm->ndm_flags;
1700 err = 0;
1701 }
1702 goto out;
1703 }
1704
1705 if (dev == NULL)
1706 goto out;
1707
1708 neigh = neigh_lookup(tbl, dst, dev);
1709 if (neigh == NULL) {
1710 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1711 err = -ENOENT;
1712 goto out;
1713 }
1714
1715 neigh = __neigh_lookup_errno(tbl, dst, dev);
1716 if (IS_ERR(neigh)) {
1717 err = PTR_ERR(neigh);
1718 goto out;
1719 }
1720 } else {
1721 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1722 err = -EEXIST;
1723 neigh_release(neigh);
1724 goto out;
1725 }
1726
1727 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1728 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1729 }
1730
1731 if (ndm->ndm_flags & NTF_USE) {
1732 neigh_event_send(neigh, NULL);
1733 err = 0;
1734 } else
1735 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1736 neigh_release(neigh);
1737
1738out:
1739 return err;
1740}
1741
1742static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1743{
1744 struct nlattr *nest;
1745
1746 nest = nla_nest_start(skb, NDTA_PARMS);
1747 if (nest == NULL)
1748 return -ENOBUFS;
1749
1750 if ((parms->dev &&
1751 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1752 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1753 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1754 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1755 /* approximative value for deprecated QUEUE_LEN (in packets) */
1756 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1757 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1758 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1759 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1760 nla_put_u32(skb, NDTPA_UCAST_PROBES,
1761 NEIGH_VAR(parms, UCAST_PROBES)) ||
1762 nla_put_u32(skb, NDTPA_MCAST_PROBES,
1763 NEIGH_VAR(parms, MCAST_PROBES)) ||
1764 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1765 NEIGH_VAR(parms, MCAST_REPROBES)) ||
1766 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1767 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1768 NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1769 nla_put_msecs(skb, NDTPA_GC_STALETIME,
1770 NEIGH_VAR(parms, GC_STALETIME)) ||
1771 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1772 NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1773 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1774 NEIGH_VAR(parms, RETRANS_TIME)) ||
1775 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1776 NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1777 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1778 NEIGH_VAR(parms, PROXY_DELAY)) ||
1779 nla_put_msecs(skb, NDTPA_LOCKTIME,
1780 NEIGH_VAR(parms, LOCKTIME)))
1781 goto nla_put_failure;
1782 return nla_nest_end(skb, nest);
1783
1784nla_put_failure:
1785 nla_nest_cancel(skb, nest);
1786 return -EMSGSIZE;
1787}
1788
1789static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1790 u32 pid, u32 seq, int type, int flags)
1791{
1792 struct nlmsghdr *nlh;
1793 struct ndtmsg *ndtmsg;
1794
1795 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1796 if (nlh == NULL)
1797 return -EMSGSIZE;
1798
1799 ndtmsg = nlmsg_data(nlh);
1800
1801 read_lock_bh(&tbl->lock);
1802 ndtmsg->ndtm_family = tbl->family;
1803 ndtmsg->ndtm_pad1 = 0;
1804 ndtmsg->ndtm_pad2 = 0;
1805
1806 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1807 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1808 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1809 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1810 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1811 goto nla_put_failure;
1812 {
1813 unsigned long now = jiffies;
1814 unsigned int flush_delta = now - tbl->last_flush;
1815 unsigned int rand_delta = now - tbl->last_rand;
1816 struct neigh_hash_table *nht;
1817 struct ndt_config ndc = {
1818 .ndtc_key_len = tbl->key_len,
1819 .ndtc_entry_size = tbl->entry_size,
1820 .ndtc_entries = atomic_read(&tbl->entries),
1821 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1822 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1823 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1824 };
1825
1826 rcu_read_lock_bh();
1827 nht = rcu_dereference_bh(tbl->nht);
1828 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1829 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1830 rcu_read_unlock_bh();
1831
1832 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1833 goto nla_put_failure;
1834 }
1835
1836 {
1837 int cpu;
1838 struct ndt_stats ndst;
1839
1840 memset(&ndst, 0, sizeof(ndst));
1841
1842 for_each_possible_cpu(cpu) {
1843 struct neigh_statistics *st;
1844
1845 st = per_cpu_ptr(tbl->stats, cpu);
1846 ndst.ndts_allocs += st->allocs;
1847 ndst.ndts_destroys += st->destroys;
1848 ndst.ndts_hash_grows += st->hash_grows;
1849 ndst.ndts_res_failed += st->res_failed;
1850 ndst.ndts_lookups += st->lookups;
1851 ndst.ndts_hits += st->hits;
1852 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1853 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1854 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1855 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1856 ndst.ndts_table_fulls += st->table_fulls;
1857 }
1858
1859 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1860 goto nla_put_failure;
1861 }
1862
1863 BUG_ON(tbl->parms.dev);
1864 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1865 goto nla_put_failure;
1866
1867 read_unlock_bh(&tbl->lock);
1868 nlmsg_end(skb, nlh);
1869 return 0;
1870
1871nla_put_failure:
1872 read_unlock_bh(&tbl->lock);
1873 nlmsg_cancel(skb, nlh);
1874 return -EMSGSIZE;
1875}
1876
1877static int neightbl_fill_param_info(struct sk_buff *skb,
1878 struct neigh_table *tbl,
1879 struct neigh_parms *parms,
1880 u32 pid, u32 seq, int type,
1881 unsigned int flags)
1882{
1883 struct ndtmsg *ndtmsg;
1884 struct nlmsghdr *nlh;
1885
1886 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1887 if (nlh == NULL)
1888 return -EMSGSIZE;
1889
1890 ndtmsg = nlmsg_data(nlh);
1891
1892 read_lock_bh(&tbl->lock);
1893 ndtmsg->ndtm_family = tbl->family;
1894 ndtmsg->ndtm_pad1 = 0;
1895 ndtmsg->ndtm_pad2 = 0;
1896
1897 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1898 neightbl_fill_parms(skb, parms) < 0)
1899 goto errout;
1900
1901 read_unlock_bh(&tbl->lock);
1902 nlmsg_end(skb, nlh);
1903 return 0;
1904errout:
1905 read_unlock_bh(&tbl->lock);
1906 nlmsg_cancel(skb, nlh);
1907 return -EMSGSIZE;
1908}
1909
1910static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1911 [NDTA_NAME] = { .type = NLA_STRING },
1912 [NDTA_THRESH1] = { .type = NLA_U32 },
1913 [NDTA_THRESH2] = { .type = NLA_U32 },
1914 [NDTA_THRESH3] = { .type = NLA_U32 },
1915 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1916 [NDTA_PARMS] = { .type = NLA_NESTED },
1917};
1918
1919static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1920 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1921 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1922 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1923 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1924 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1925 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1926 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
1927 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1928 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1929 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1930 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1931 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1932 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1933 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1934};
1935
1936static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1937{
1938 struct net *net = sock_net(skb->sk);
1939 struct neigh_table *tbl;
1940 struct ndtmsg *ndtmsg;
1941 struct nlattr *tb[NDTA_MAX+1];
1942 bool found = false;
1943 int err, tidx;
1944
1945 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1946 nl_neightbl_policy);
1947 if (err < 0)
1948 goto errout;
1949
1950 if (tb[NDTA_NAME] == NULL) {
1951 err = -EINVAL;
1952 goto errout;
1953 }
1954
1955 ndtmsg = nlmsg_data(nlh);
1956
1957 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1958 tbl = neigh_tables[tidx];
1959 if (!tbl)
1960 continue;
1961 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1962 continue;
1963 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1964 found = true;
1965 break;
1966 }
1967 }
1968
1969 if (!found)
1970 return -ENOENT;
1971
1972 /*
1973 * We acquire tbl->lock to be nice to the periodic timers and
1974 * make sure they always see a consistent set of values.
1975 */
1976 write_lock_bh(&tbl->lock);
1977
1978 if (tb[NDTA_PARMS]) {
1979 struct nlattr *tbp[NDTPA_MAX+1];
1980 struct neigh_parms *p;
1981 int i, ifindex = 0;
1982
1983 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1984 nl_ntbl_parm_policy);
1985 if (err < 0)
1986 goto errout_tbl_lock;
1987
1988 if (tbp[NDTPA_IFINDEX])
1989 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1990
1991 p = lookup_neigh_parms(tbl, net, ifindex);
1992 if (p == NULL) {
1993 err = -ENOENT;
1994 goto errout_tbl_lock;
1995 }
1996
1997 for (i = 1; i <= NDTPA_MAX; i++) {
1998 if (tbp[i] == NULL)
1999 continue;
2000
2001 switch (i) {
2002 case NDTPA_QUEUE_LEN:
2003 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2004 nla_get_u32(tbp[i]) *
2005 SKB_TRUESIZE(ETH_FRAME_LEN));
2006 break;
2007 case NDTPA_QUEUE_LENBYTES:
2008 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2009 nla_get_u32(tbp[i]));
2010 break;
2011 case NDTPA_PROXY_QLEN:
2012 NEIGH_VAR_SET(p, PROXY_QLEN,
2013 nla_get_u32(tbp[i]));
2014 break;
2015 case NDTPA_APP_PROBES:
2016 NEIGH_VAR_SET(p, APP_PROBES,
2017 nla_get_u32(tbp[i]));
2018 break;
2019 case NDTPA_UCAST_PROBES:
2020 NEIGH_VAR_SET(p, UCAST_PROBES,
2021 nla_get_u32(tbp[i]));
2022 break;
2023 case NDTPA_MCAST_PROBES:
2024 NEIGH_VAR_SET(p, MCAST_PROBES,
2025 nla_get_u32(tbp[i]));
2026 break;
2027 case NDTPA_MCAST_REPROBES:
2028 NEIGH_VAR_SET(p, MCAST_REPROBES,
2029 nla_get_u32(tbp[i]));
2030 break;
2031 case NDTPA_BASE_REACHABLE_TIME:
2032 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2033 nla_get_msecs(tbp[i]));
2034 /* update reachable_time as well, otherwise, the change will
2035 * only be effective after the next time neigh_periodic_work
2036 * decides to recompute it (can be multiple minutes)
2037 */
2038 p->reachable_time =
2039 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2040 break;
2041 case NDTPA_GC_STALETIME:
2042 NEIGH_VAR_SET(p, GC_STALETIME,
2043 nla_get_msecs(tbp[i]));
2044 break;
2045 case NDTPA_DELAY_PROBE_TIME:
2046 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2047 nla_get_msecs(tbp[i]));
2048 break;
2049 case NDTPA_RETRANS_TIME:
2050 NEIGH_VAR_SET(p, RETRANS_TIME,
2051 nla_get_msecs(tbp[i]));
2052 break;
2053 case NDTPA_ANYCAST_DELAY:
2054 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2055 nla_get_msecs(tbp[i]));
2056 break;
2057 case NDTPA_PROXY_DELAY:
2058 NEIGH_VAR_SET(p, PROXY_DELAY,
2059 nla_get_msecs(tbp[i]));
2060 break;
2061 case NDTPA_LOCKTIME:
2062 NEIGH_VAR_SET(p, LOCKTIME,
2063 nla_get_msecs(tbp[i]));
2064 break;
2065 }
2066 }
2067 }
2068
2069 err = -ENOENT;
2070 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2071 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2072 !net_eq(net, &init_net))
2073 goto errout_tbl_lock;
2074
2075 if (tb[NDTA_THRESH1])
2076 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2077
2078 if (tb[NDTA_THRESH2])
2079 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2080
2081 if (tb[NDTA_THRESH3])
2082 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2083
2084 if (tb[NDTA_GC_INTERVAL])
2085 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2086
2087 err = 0;
2088
2089errout_tbl_lock:
2090 write_unlock_bh(&tbl->lock);
2091errout:
2092 return err;
2093}
2094
2095static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2096{
2097 struct net *net = sock_net(skb->sk);
2098 int family, tidx, nidx = 0;
2099 int tbl_skip = cb->args[0];
2100 int neigh_skip = cb->args[1];
2101 struct neigh_table *tbl;
2102
2103 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2104
2105 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2106 struct neigh_parms *p;
2107
2108 tbl = neigh_tables[tidx];
2109 if (!tbl)
2110 continue;
2111
2112 if (tidx < tbl_skip || (family && tbl->family != family))
2113 continue;
2114
2115 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2116 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2117 NLM_F_MULTI) < 0)
2118 break;
2119
2120 nidx = 0;
2121 p = list_next_entry(&tbl->parms, list);
2122 list_for_each_entry_from(p, &tbl->parms_list, list) {
2123 if (!net_eq(neigh_parms_net(p), net))
2124 continue;
2125
2126 if (nidx < neigh_skip)
2127 goto next;
2128
2129 if (neightbl_fill_param_info(skb, tbl, p,
2130 NETLINK_CB(cb->skb).portid,
2131 cb->nlh->nlmsg_seq,
2132 RTM_NEWNEIGHTBL,
2133 NLM_F_MULTI) < 0)
2134 goto out;
2135 next:
2136 nidx++;
2137 }
2138
2139 neigh_skip = 0;
2140 }
2141out:
2142 cb->args[0] = tidx;
2143 cb->args[1] = nidx;
2144
2145 return skb->len;
2146}
2147
2148static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2149 u32 pid, u32 seq, int type, unsigned int flags)
2150{
2151 unsigned long now = jiffies;
2152 struct nda_cacheinfo ci;
2153 struct nlmsghdr *nlh;
2154 struct ndmsg *ndm;
2155
2156 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2157 if (nlh == NULL)
2158 return -EMSGSIZE;
2159
2160 ndm = nlmsg_data(nlh);
2161 ndm->ndm_family = neigh->ops->family;
2162 ndm->ndm_pad1 = 0;
2163 ndm->ndm_pad2 = 0;
2164 ndm->ndm_flags = neigh->flags;
2165 ndm->ndm_type = neigh->type;
2166 ndm->ndm_ifindex = neigh->dev->ifindex;
2167
2168 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2169 goto nla_put_failure;
2170
2171 read_lock_bh(&neigh->lock);
2172 ndm->ndm_state = neigh->nud_state;
2173 if (neigh->nud_state & NUD_VALID) {
2174 char haddr[MAX_ADDR_LEN];
2175
2176 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2177 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2178 read_unlock_bh(&neigh->lock);
2179 goto nla_put_failure;
2180 }
2181 }
2182
2183 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2184 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2185 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2186 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2187 read_unlock_bh(&neigh->lock);
2188
2189 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2190 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2191 goto nla_put_failure;
2192
2193 nlmsg_end(skb, nlh);
2194 return 0;
2195
2196nla_put_failure:
2197 nlmsg_cancel(skb, nlh);
2198 return -EMSGSIZE;
2199}
2200
2201static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2202 u32 pid, u32 seq, int type, unsigned int flags,
2203 struct neigh_table *tbl)
2204{
2205 struct nlmsghdr *nlh;
2206 struct ndmsg *ndm;
2207
2208 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2209 if (nlh == NULL)
2210 return -EMSGSIZE;
2211
2212 ndm = nlmsg_data(nlh);
2213 ndm->ndm_family = tbl->family;
2214 ndm->ndm_pad1 = 0;
2215 ndm->ndm_pad2 = 0;
2216 ndm->ndm_flags = pn->flags | NTF_PROXY;
2217 ndm->ndm_type = RTN_UNICAST;
2218 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219 ndm->ndm_state = NUD_NONE;
2220
2221 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2222 goto nla_put_failure;
2223
2224 nlmsg_end(skb, nlh);
2225 return 0;
2226
2227nla_put_failure:
2228 nlmsg_cancel(skb, nlh);
2229 return -EMSGSIZE;
2230}
2231
2232static void neigh_update_notify(struct neighbour *neigh)
2233{
2234 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2235 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2236}
2237
2238static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2239{
2240 struct net_device *master;
2241
2242 if (!master_idx)
2243 return false;
2244
2245 master = netdev_master_upper_dev_get(dev);
2246 if (!master || master->ifindex != master_idx)
2247 return true;
2248
2249 return false;
2250}
2251
2252static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2253{
2254 if (filter_idx && dev->ifindex != filter_idx)
2255 return true;
2256
2257 return false;
2258}
2259
2260static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2261 struct netlink_callback *cb)
2262{
2263 struct net *net = sock_net(skb->sk);
2264 const struct nlmsghdr *nlh = cb->nlh;
2265 struct nlattr *tb[NDA_MAX + 1];
2266 struct neighbour *n;
2267 int rc, h, s_h = cb->args[1];
2268 int idx, s_idx = idx = cb->args[2];
2269 struct neigh_hash_table *nht;
2270 int filter_master_idx = 0, filter_idx = 0;
2271 unsigned int flags = NLM_F_MULTI;
2272 int err;
2273
2274 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2275 if (!err) {
2276 if (tb[NDA_IFINDEX])
2277 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2278
2279 if (tb[NDA_MASTER])
2280 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2281
2282 if (filter_idx || filter_master_idx)
2283 flags |= NLM_F_DUMP_FILTERED;
2284 }
2285
2286 rcu_read_lock_bh();
2287 nht = rcu_dereference_bh(tbl->nht);
2288
2289 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2290 if (h > s_h)
2291 s_idx = 0;
2292 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2293 n != NULL;
2294 n = rcu_dereference_bh(n->next)) {
2295 if (!net_eq(dev_net(n->dev), net))
2296 continue;
2297 if (neigh_ifindex_filtered(n->dev, filter_idx))
2298 continue;
2299 if (neigh_master_filtered(n->dev, filter_master_idx))
2300 continue;
2301 if (idx < s_idx)
2302 goto next;
2303 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2304 cb->nlh->nlmsg_seq,
2305 RTM_NEWNEIGH,
2306 flags) < 0) {
2307 rc = -1;
2308 goto out;
2309 }
2310next:
2311 idx++;
2312 }
2313 }
2314 rc = skb->len;
2315out:
2316 rcu_read_unlock_bh();
2317 cb->args[1] = h;
2318 cb->args[2] = idx;
2319 return rc;
2320}
2321
2322static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2323 struct netlink_callback *cb)
2324{
2325 struct pneigh_entry *n;
2326 struct net *net = sock_net(skb->sk);
2327 int rc, h, s_h = cb->args[3];
2328 int idx, s_idx = idx = cb->args[4];
2329
2330 read_lock_bh(&tbl->lock);
2331
2332 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2333 if (h > s_h)
2334 s_idx = 0;
2335 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2336 if (pneigh_net(n) != net)
2337 continue;
2338 if (idx < s_idx)
2339 goto next;
2340 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2341 cb->nlh->nlmsg_seq,
2342 RTM_NEWNEIGH,
2343 NLM_F_MULTI, tbl) < 0) {
2344 read_unlock_bh(&tbl->lock);
2345 rc = -1;
2346 goto out;
2347 }
2348 next:
2349 idx++;
2350 }
2351 }
2352
2353 read_unlock_bh(&tbl->lock);
2354 rc = skb->len;
2355out:
2356 cb->args[3] = h;
2357 cb->args[4] = idx;
2358 return rc;
2359
2360}
2361
2362static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2363{
2364 struct neigh_table *tbl;
2365 int t, family, s_t;
2366 int proxy = 0;
2367 int err;
2368
2369 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2370
2371 /* check for full ndmsg structure presence, family member is
2372 * the same for both structures
2373 */
2374 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2375 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2376 proxy = 1;
2377
2378 s_t = cb->args[0];
2379
2380 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2381 tbl = neigh_tables[t];
2382
2383 if (!tbl)
2384 continue;
2385 if (t < s_t || (family && tbl->family != family))
2386 continue;
2387 if (t > s_t)
2388 memset(&cb->args[1], 0, sizeof(cb->args) -
2389 sizeof(cb->args[0]));
2390 if (proxy)
2391 err = pneigh_dump_table(tbl, skb, cb);
2392 else
2393 err = neigh_dump_table(tbl, skb, cb);
2394 if (err < 0)
2395 break;
2396 }
2397
2398 cb->args[0] = t;
2399 return skb->len;
2400}
2401
2402void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2403{
2404 int chain;
2405 struct neigh_hash_table *nht;
2406
2407 rcu_read_lock_bh();
2408 nht = rcu_dereference_bh(tbl->nht);
2409
2410 read_lock(&tbl->lock); /* avoid resizes */
2411 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2412 struct neighbour *n;
2413
2414 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2415 n != NULL;
2416 n = rcu_dereference_bh(n->next))
2417 cb(n, cookie);
2418 }
2419 read_unlock(&tbl->lock);
2420 rcu_read_unlock_bh();
2421}
2422EXPORT_SYMBOL(neigh_for_each);
2423
2424/* The tbl->lock must be held as a writer and BH disabled. */
2425void __neigh_for_each_release(struct neigh_table *tbl,
2426 int (*cb)(struct neighbour *))
2427{
2428 int chain;
2429 struct neigh_hash_table *nht;
2430
2431 nht = rcu_dereference_protected(tbl->nht,
2432 lockdep_is_held(&tbl->lock));
2433 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2434 struct neighbour *n;
2435 struct neighbour __rcu **np;
2436
2437 np = &nht->hash_buckets[chain];
2438 while ((n = rcu_dereference_protected(*np,
2439 lockdep_is_held(&tbl->lock))) != NULL) {
2440 int release;
2441
2442 write_lock(&n->lock);
2443 release = cb(n);
2444 if (release) {
2445 rcu_assign_pointer(*np,
2446 rcu_dereference_protected(n->next,
2447 lockdep_is_held(&tbl->lock)));
2448 n->dead = 1;
2449 } else
2450 np = &n->next;
2451 write_unlock(&n->lock);
2452 if (release)
2453 neigh_cleanup_and_release(n);
2454 }
2455 }
2456}
2457EXPORT_SYMBOL(__neigh_for_each_release);
2458
2459int neigh_xmit(int index, struct net_device *dev,
2460 const void *addr, struct sk_buff *skb)
2461{
2462 int err = -EAFNOSUPPORT;
2463 if (likely(index < NEIGH_NR_TABLES)) {
2464 struct neigh_table *tbl;
2465 struct neighbour *neigh;
2466
2467 tbl = neigh_tables[index];
2468 if (!tbl)
2469 goto out;
2470 neigh = __neigh_lookup_noref(tbl, addr, dev);
2471 if (!neigh)
2472 neigh = __neigh_create(tbl, addr, dev, false);
2473 err = PTR_ERR(neigh);
2474 if (IS_ERR(neigh))
2475 goto out_kfree_skb;
2476 err = neigh->output(neigh, skb);
2477 }
2478 else if (index == NEIGH_LINK_TABLE) {
2479 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2480 addr, NULL, skb->len);
2481 if (err < 0)
2482 goto out_kfree_skb;
2483 err = dev_queue_xmit(skb);
2484 }
2485out:
2486 return err;
2487out_kfree_skb:
2488 kfree_skb(skb);
2489 goto out;
2490}
2491EXPORT_SYMBOL(neigh_xmit);
2492
2493#ifdef CONFIG_PROC_FS
2494
2495static struct neighbour *neigh_get_first(struct seq_file *seq)
2496{
2497 struct neigh_seq_state *state = seq->private;
2498 struct net *net = seq_file_net(seq);
2499 struct neigh_hash_table *nht = state->nht;
2500 struct neighbour *n = NULL;
2501 int bucket = state->bucket;
2502
2503 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2504 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2505 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2506
2507 while (n) {
2508 if (!net_eq(dev_net(n->dev), net))
2509 goto next;
2510 if (state->neigh_sub_iter) {
2511 loff_t fakep = 0;
2512 void *v;
2513
2514 v = state->neigh_sub_iter(state, n, &fakep);
2515 if (!v)
2516 goto next;
2517 }
2518 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2519 break;
2520 if (n->nud_state & ~NUD_NOARP)
2521 break;
2522next:
2523 n = rcu_dereference_bh(n->next);
2524 }
2525
2526 if (n)
2527 break;
2528 }
2529 state->bucket = bucket;
2530
2531 return n;
2532}
2533
2534static struct neighbour *neigh_get_next(struct seq_file *seq,
2535 struct neighbour *n,
2536 loff_t *pos)
2537{
2538 struct neigh_seq_state *state = seq->private;
2539 struct net *net = seq_file_net(seq);
2540 struct neigh_hash_table *nht = state->nht;
2541
2542 if (state->neigh_sub_iter) {
2543 void *v = state->neigh_sub_iter(state, n, pos);
2544 if (v)
2545 return n;
2546 }
2547 n = rcu_dereference_bh(n->next);
2548
2549 while (1) {
2550 while (n) {
2551 if (!net_eq(dev_net(n->dev), net))
2552 goto next;
2553 if (state->neigh_sub_iter) {
2554 void *v = state->neigh_sub_iter(state, n, pos);
2555 if (v)
2556 return n;
2557 goto next;
2558 }
2559 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2560 break;
2561
2562 if (n->nud_state & ~NUD_NOARP)
2563 break;
2564next:
2565 n = rcu_dereference_bh(n->next);
2566 }
2567
2568 if (n)
2569 break;
2570
2571 if (++state->bucket >= (1 << nht->hash_shift))
2572 break;
2573
2574 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2575 }
2576
2577 if (n && pos)
2578 --(*pos);
2579 return n;
2580}
2581
2582static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2583{
2584 struct neighbour *n = neigh_get_first(seq);
2585
2586 if (n) {
2587 --(*pos);
2588 while (*pos) {
2589 n = neigh_get_next(seq, n, pos);
2590 if (!n)
2591 break;
2592 }
2593 }
2594 return *pos ? NULL : n;
2595}
2596
2597static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2598{
2599 struct neigh_seq_state *state = seq->private;
2600 struct net *net = seq_file_net(seq);
2601 struct neigh_table *tbl = state->tbl;
2602 struct pneigh_entry *pn = NULL;
2603 int bucket = state->bucket;
2604
2605 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2606 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2607 pn = tbl->phash_buckets[bucket];
2608 while (pn && !net_eq(pneigh_net(pn), net))
2609 pn = pn->next;
2610 if (pn)
2611 break;
2612 }
2613 state->bucket = bucket;
2614
2615 return pn;
2616}
2617
2618static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2619 struct pneigh_entry *pn,
2620 loff_t *pos)
2621{
2622 struct neigh_seq_state *state = seq->private;
2623 struct net *net = seq_file_net(seq);
2624 struct neigh_table *tbl = state->tbl;
2625
2626 do {
2627 pn = pn->next;
2628 } while (pn && !net_eq(pneigh_net(pn), net));
2629
2630 while (!pn) {
2631 if (++state->bucket > PNEIGH_HASHMASK)
2632 break;
2633 pn = tbl->phash_buckets[state->bucket];
2634 while (pn && !net_eq(pneigh_net(pn), net))
2635 pn = pn->next;
2636 if (pn)
2637 break;
2638 }
2639
2640 if (pn && pos)
2641 --(*pos);
2642
2643 return pn;
2644}
2645
2646static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2647{
2648 struct pneigh_entry *pn = pneigh_get_first(seq);
2649
2650 if (pn) {
2651 --(*pos);
2652 while (*pos) {
2653 pn = pneigh_get_next(seq, pn, pos);
2654 if (!pn)
2655 break;
2656 }
2657 }
2658 return *pos ? NULL : pn;
2659}
2660
2661static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2662{
2663 struct neigh_seq_state *state = seq->private;
2664 void *rc;
2665 loff_t idxpos = *pos;
2666
2667 rc = neigh_get_idx(seq, &idxpos);
2668 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2669 rc = pneigh_get_idx(seq, &idxpos);
2670
2671 return rc;
2672}
2673
2674void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2675 __acquires(rcu_bh)
2676{
2677 struct neigh_seq_state *state = seq->private;
2678
2679 state->tbl = tbl;
2680 state->bucket = 0;
2681 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2682
2683 rcu_read_lock_bh();
2684 state->nht = rcu_dereference_bh(tbl->nht);
2685
2686 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2687}
2688EXPORT_SYMBOL(neigh_seq_start);
2689
2690void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2691{
2692 struct neigh_seq_state *state;
2693 void *rc;
2694
2695 if (v == SEQ_START_TOKEN) {
2696 rc = neigh_get_first(seq);
2697 goto out;
2698 }
2699
2700 state = seq->private;
2701 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2702 rc = neigh_get_next(seq, v, NULL);
2703 if (rc)
2704 goto out;
2705 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2706 rc = pneigh_get_first(seq);
2707 } else {
2708 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2709 rc = pneigh_get_next(seq, v, NULL);
2710 }
2711out:
2712 ++(*pos);
2713 return rc;
2714}
2715EXPORT_SYMBOL(neigh_seq_next);
2716
2717void neigh_seq_stop(struct seq_file *seq, void *v)
2718 __releases(rcu_bh)
2719{
2720 rcu_read_unlock_bh();
2721}
2722EXPORT_SYMBOL(neigh_seq_stop);
2723
2724/* statistics via seq_file */
2725
2726static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2727{
2728 struct neigh_table *tbl = seq->private;
2729 int cpu;
2730
2731 if (*pos == 0)
2732 return SEQ_START_TOKEN;
2733
2734 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2735 if (!cpu_possible(cpu))
2736 continue;
2737 *pos = cpu+1;
2738 return per_cpu_ptr(tbl->stats, cpu);
2739 }
2740 return NULL;
2741}
2742
2743static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2744{
2745 struct neigh_table *tbl = seq->private;
2746 int cpu;
2747
2748 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2749 if (!cpu_possible(cpu))
2750 continue;
2751 *pos = cpu+1;
2752 return per_cpu_ptr(tbl->stats, cpu);
2753 }
2754 return NULL;
2755}
2756
2757static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2758{
2759
2760}
2761
2762static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2763{
2764 struct neigh_table *tbl = seq->private;
2765 struct neigh_statistics *st = v;
2766
2767 if (v == SEQ_START_TOKEN) {
2768 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2769 return 0;
2770 }
2771
2772 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2773 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
2774 atomic_read(&tbl->entries),
2775
2776 st->allocs,
2777 st->destroys,
2778 st->hash_grows,
2779
2780 st->lookups,
2781 st->hits,
2782
2783 st->res_failed,
2784
2785 st->rcv_probes_mcast,
2786 st->rcv_probes_ucast,
2787
2788 st->periodic_gc_runs,
2789 st->forced_gc_runs,
2790 st->unres_discards,
2791 st->table_fulls
2792 );
2793
2794 return 0;
2795}
2796
2797static const struct seq_operations neigh_stat_seq_ops = {
2798 .start = neigh_stat_seq_start,
2799 .next = neigh_stat_seq_next,
2800 .stop = neigh_stat_seq_stop,
2801 .show = neigh_stat_seq_show,
2802};
2803
2804static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2805{
2806 int ret = seq_open(file, &neigh_stat_seq_ops);
2807
2808 if (!ret) {
2809 struct seq_file *sf = file->private_data;
2810 sf->private = PDE_DATA(inode);
2811 }
2812 return ret;
2813};
2814
2815static const struct file_operations neigh_stat_seq_fops = {
2816 .owner = THIS_MODULE,
2817 .open = neigh_stat_seq_open,
2818 .read = seq_read,
2819 .llseek = seq_lseek,
2820 .release = seq_release,
2821};
2822
2823#endif /* CONFIG_PROC_FS */
2824
2825static inline size_t neigh_nlmsg_size(void)
2826{
2827 return NLMSG_ALIGN(sizeof(struct ndmsg))
2828 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2829 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2830 + nla_total_size(sizeof(struct nda_cacheinfo))
2831 + nla_total_size(4); /* NDA_PROBES */
2832}
2833
2834static void __neigh_notify(struct neighbour *n, int type, int flags)
2835{
2836 struct net *net = dev_net(n->dev);
2837 struct sk_buff *skb;
2838 int err = -ENOBUFS;
2839
2840 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2841 if (skb == NULL)
2842 goto errout;
2843
2844 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2845 if (err < 0) {
2846 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2847 WARN_ON(err == -EMSGSIZE);
2848 kfree_skb(skb);
2849 goto errout;
2850 }
2851 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2852 return;
2853errout:
2854 if (err < 0)
2855 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2856}
2857
2858void neigh_app_ns(struct neighbour *n)
2859{
2860 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2861}
2862EXPORT_SYMBOL(neigh_app_ns);
2863
2864#ifdef CONFIG_SYSCTL
2865static int zero;
2866static int int_max = INT_MAX;
2867static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2868
2869static int proc_unres_qlen(struct ctl_table *ctl, int write,
2870 void __user *buffer, size_t *lenp, loff_t *ppos)
2871{
2872 int size, ret;
2873 struct ctl_table tmp = *ctl;
2874
2875 tmp.extra1 = &zero;
2876 tmp.extra2 = &unres_qlen_max;
2877 tmp.data = &size;
2878
2879 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2880 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2881
2882 if (write && !ret)
2883 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2884 return ret;
2885}
2886
2887static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2888 int family)
2889{
2890 switch (family) {
2891 case AF_INET:
2892 return __in_dev_arp_parms_get_rcu(dev);
2893 case AF_INET6:
2894 return __in6_dev_nd_parms_get_rcu(dev);
2895 }
2896 return NULL;
2897}
2898
2899static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2900 int index)
2901{
2902 struct net_device *dev;
2903 int family = neigh_parms_family(p);
2904
2905 rcu_read_lock();
2906 for_each_netdev_rcu(net, dev) {
2907 struct neigh_parms *dst_p =
2908 neigh_get_dev_parms_rcu(dev, family);
2909
2910 if (dst_p && !test_bit(index, dst_p->data_state))
2911 dst_p->data[index] = p->data[index];
2912 }
2913 rcu_read_unlock();
2914}
2915
2916static void neigh_proc_update(struct ctl_table *ctl, int write)
2917{
2918 struct net_device *dev = ctl->extra1;
2919 struct neigh_parms *p = ctl->extra2;
2920 struct net *net = neigh_parms_net(p);
2921 int index = (int *) ctl->data - p->data;
2922
2923 if (!write)
2924 return;
2925
2926 set_bit(index, p->data_state);
2927 if (!dev) /* NULL dev means this is default value */
2928 neigh_copy_dflt_parms(net, p, index);
2929}
2930
2931static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2932 void __user *buffer,
2933 size_t *lenp, loff_t *ppos)
2934{
2935 struct ctl_table tmp = *ctl;
2936 int ret;
2937
2938 tmp.extra1 = &zero;
2939 tmp.extra2 = &int_max;
2940
2941 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2942 neigh_proc_update(ctl, write);
2943 return ret;
2944}
2945
2946int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2947 void __user *buffer, size_t *lenp, loff_t *ppos)
2948{
2949 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2950
2951 neigh_proc_update(ctl, write);
2952 return ret;
2953}
2954EXPORT_SYMBOL(neigh_proc_dointvec);
2955
2956int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2957 void __user *buffer,
2958 size_t *lenp, loff_t *ppos)
2959{
2960 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2961
2962 neigh_proc_update(ctl, write);
2963 return ret;
2964}
2965EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2966
2967static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2968 void __user *buffer,
2969 size_t *lenp, loff_t *ppos)
2970{
2971 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2972
2973 neigh_proc_update(ctl, write);
2974 return ret;
2975}
2976
2977int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2978 void __user *buffer,
2979 size_t *lenp, loff_t *ppos)
2980{
2981 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2982
2983 neigh_proc_update(ctl, write);
2984 return ret;
2985}
2986EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2987
2988static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2989 void __user *buffer,
2990 size_t *lenp, loff_t *ppos)
2991{
2992 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2993
2994 neigh_proc_update(ctl, write);
2995 return ret;
2996}
2997
2998static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
2999 void __user *buffer,
3000 size_t *lenp, loff_t *ppos)
3001{
3002 struct neigh_parms *p = ctl->extra2;
3003 int ret;
3004
3005 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3006 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3007 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3008 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3009 else
3010 ret = -1;
3011
3012 if (write && ret == 0) {
3013 /* update reachable_time as well, otherwise, the change will
3014 * only be effective after the next time neigh_periodic_work
3015 * decides to recompute it
3016 */
3017 p->reachable_time =
3018 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3019 }
3020 return ret;
3021}
3022
3023#define NEIGH_PARMS_DATA_OFFSET(index) \
3024 (&((struct neigh_parms *) 0)->data[index])
3025
3026#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3027 [NEIGH_VAR_ ## attr] = { \
3028 .procname = name, \
3029 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3030 .maxlen = sizeof(int), \
3031 .mode = mval, \
3032 .proc_handler = proc, \
3033 }
3034
3035#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3036 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3037
3038#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3039 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3040
3041#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3042 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3043
3044#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3045 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3046
3047#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3048 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3049
3050#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3051 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3052
3053static struct neigh_sysctl_table {
3054 struct ctl_table_header *sysctl_header;
3055 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3056} neigh_sysctl_template __read_mostly = {
3057 .neigh_vars = {
3058 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3059 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3060 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3061 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3062 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3063 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3064 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3065 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3066 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3067 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3068 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3069 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3070 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3071 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3072 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3073 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3074 [NEIGH_VAR_GC_INTERVAL] = {
3075 .procname = "gc_interval",
3076 .maxlen = sizeof(int),
3077 .mode = 0644,
3078 .proc_handler = proc_dointvec_jiffies,
3079 },
3080 [NEIGH_VAR_GC_THRESH1] = {
3081 .procname = "gc_thresh1",
3082 .maxlen = sizeof(int),
3083 .mode = 0644,
3084 .extra1 = &zero,
3085 .extra2 = &int_max,
3086 .proc_handler = proc_dointvec_minmax,
3087 },
3088 [NEIGH_VAR_GC_THRESH2] = {
3089 .procname = "gc_thresh2",
3090 .maxlen = sizeof(int),
3091 .mode = 0644,
3092 .extra1 = &zero,
3093 .extra2 = &int_max,
3094 .proc_handler = proc_dointvec_minmax,
3095 },
3096 [NEIGH_VAR_GC_THRESH3] = {
3097 .procname = "gc_thresh3",
3098 .maxlen = sizeof(int),
3099 .mode = 0644,
3100 .extra1 = &zero,
3101 .extra2 = &int_max,
3102 .proc_handler = proc_dointvec_minmax,
3103 },
3104 {},
3105 },
3106};
3107
3108int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3109 proc_handler *handler)
3110{
3111 int i;
3112 struct neigh_sysctl_table *t;
3113 const char *dev_name_source;
3114 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3115 char *p_name;
3116
3117 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3118 if (!t)
3119 goto err;
3120
3121 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3122 t->neigh_vars[i].data += (long) p;
3123 t->neigh_vars[i].extra1 = dev;
3124 t->neigh_vars[i].extra2 = p;
3125 }
3126
3127 if (dev) {
3128 dev_name_source = dev->name;
3129 /* Terminate the table early */
3130 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3131 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3132 } else {
3133 struct neigh_table *tbl = p->tbl;
3134 dev_name_source = "default";
3135 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3136 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3137 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3138 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3139 }
3140
3141 if (handler) {
3142 /* RetransTime */
3143 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3144 /* ReachableTime */
3145 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3146 /* RetransTime (in milliseconds)*/
3147 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3148 /* ReachableTime (in milliseconds) */
3149 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3150 } else {
3151 /* Those handlers will update p->reachable_time after
3152 * base_reachable_time(_ms) is set to ensure the new timer starts being
3153 * applied after the next neighbour update instead of waiting for
3154 * neigh_periodic_work to update its value (can be multiple minutes)
3155 * So any handler that replaces them should do this as well
3156 */
3157 /* ReachableTime */
3158 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3159 neigh_proc_base_reachable_time;
3160 /* ReachableTime (in milliseconds) */
3161 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3162 neigh_proc_base_reachable_time;
3163 }
3164
3165 /* Don't export sysctls to unprivileged users */
3166 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3167 t->neigh_vars[0].procname = NULL;
3168
3169 switch (neigh_parms_family(p)) {
3170 case AF_INET:
3171 p_name = "ipv4";
3172 break;
3173 case AF_INET6:
3174 p_name = "ipv6";
3175 break;
3176 default:
3177 BUG();
3178 }
3179
3180 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3181 p_name, dev_name_source);
3182 t->sysctl_header =
3183 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3184 if (!t->sysctl_header)
3185 goto free;
3186
3187 p->sysctl_table = t;
3188 return 0;
3189
3190free:
3191 kfree(t);
3192err:
3193 return -ENOBUFS;
3194}
3195EXPORT_SYMBOL(neigh_sysctl_register);
3196
3197void neigh_sysctl_unregister(struct neigh_parms *p)
3198{
3199 if (p->sysctl_table) {
3200 struct neigh_sysctl_table *t = p->sysctl_table;
3201 p->sysctl_table = NULL;
3202 unregister_net_sysctl_table(t->sysctl_header);
3203 kfree(t);
3204 }
3205}
3206EXPORT_SYMBOL(neigh_sysctl_unregister);
3207
3208#endif /* CONFIG_SYSCTL */
3209
3210static int __init neigh_init(void)
3211{
3212 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3213 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3214 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3215
3216 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3217 NULL);
3218 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3219
3220 return 0;
3221}
3222
3223subsys_initcall(neigh_init);
3224