Loading...
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/socket.h>
25#include <linux/netdevice.h>
26#include <linux/proc_fs.h>
27#ifdef CONFIG_SYSCTL
28#include <linux/sysctl.h>
29#endif
30#include <linux/times.h>
31#include <net/net_namespace.h>
32#include <net/neighbour.h>
33#include <net/dst.h>
34#include <net/sock.h>
35#include <net/netevent.h>
36#include <net/netlink.h>
37#include <linux/rtnetlink.h>
38#include <linux/random.h>
39#include <linux/string.h>
40#include <linux/log2.h>
41#include <linux/inetdevice.h>
42#include <net/addrconf.h>
43
44#define DEBUG
45#define NEIGH_DEBUG 1
46#define neigh_dbg(level, fmt, ...) \
47do { \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
50} while (0)
51
52#define PNEIGH_HASHMASK 0xF
53
54static void neigh_timer_handler(unsigned long arg);
55static void __neigh_notify(struct neighbour *n, int type, int flags);
56static void neigh_update_notify(struct neighbour *neigh);
57static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
58
59#ifdef CONFIG_PROC_FS
60static const struct file_operations neigh_stat_seq_fops;
61#endif
62
63/*
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
65
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
70 cache.
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
73
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
77
78 Reference count prevents destruction.
79
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
82 - timer
83 - resolution queue
84
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
89 */
90
91static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
92{
93 kfree_skb(skb);
94 return -ENETDOWN;
95}
96
97static void neigh_cleanup_and_release(struct neighbour *neigh)
98{
99 if (neigh->parms->neigh_cleanup)
100 neigh->parms->neigh_cleanup(neigh);
101
102 __neigh_notify(neigh, RTM_DELNEIGH, 0);
103 neigh_release(neigh);
104}
105
106/*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
112unsigned long neigh_rand_reach_time(unsigned long base)
113{
114 return base ? (prandom_u32() % base) + (base >> 1) : 0;
115}
116EXPORT_SYMBOL(neigh_rand_reach_time);
117
118
119static int neigh_forced_gc(struct neigh_table *tbl)
120{
121 int shrunk = 0;
122 int i;
123 struct neigh_hash_table *nht;
124
125 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126
127 write_lock_bh(&tbl->lock);
128 nht = rcu_dereference_protected(tbl->nht,
129 lockdep_is_held(&tbl->lock));
130 for (i = 0; i < (1 << nht->hash_shift); i++) {
131 struct neighbour *n;
132 struct neighbour __rcu **np;
133
134 np = &nht->hash_buckets[i];
135 while ((n = rcu_dereference_protected(*np,
136 lockdep_is_held(&tbl->lock))) != NULL) {
137 /* Neighbour record may be discarded if:
138 * - nobody refers to it.
139 * - it is not permanent
140 */
141 write_lock(&n->lock);
142 if (atomic_read(&n->refcnt) == 1 &&
143 !(n->nud_state & NUD_PERMANENT)) {
144 rcu_assign_pointer(*np,
145 rcu_dereference_protected(n->next,
146 lockdep_is_held(&tbl->lock)));
147 n->dead = 1;
148 shrunk = 1;
149 write_unlock(&n->lock);
150 neigh_cleanup_and_release(n);
151 continue;
152 }
153 write_unlock(&n->lock);
154 np = &n->next;
155 }
156 }
157
158 tbl->last_flush = jiffies;
159
160 write_unlock_bh(&tbl->lock);
161
162 return shrunk;
163}
164
165static void neigh_add_timer(struct neighbour *n, unsigned long when)
166{
167 neigh_hold(n);
168 if (unlikely(mod_timer(&n->timer, when))) {
169 printk("NEIGH: BUG, double timer add, state is %x\n",
170 n->nud_state);
171 dump_stack();
172 }
173}
174
175static int neigh_del_timer(struct neighbour *n)
176{
177 if ((n->nud_state & NUD_IN_TIMER) &&
178 del_timer(&n->timer)) {
179 neigh_release(n);
180 return 1;
181 }
182 return 0;
183}
184
185static void pneigh_queue_purge(struct sk_buff_head *list)
186{
187 struct sk_buff *skb;
188
189 while ((skb = skb_dequeue(list)) != NULL) {
190 dev_put(skb->dev);
191 kfree_skb(skb);
192 }
193}
194
195static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
196{
197 int i;
198 struct neigh_hash_table *nht;
199
200 nht = rcu_dereference_protected(tbl->nht,
201 lockdep_is_held(&tbl->lock));
202
203 for (i = 0; i < (1 << nht->hash_shift); i++) {
204 struct neighbour *n;
205 struct neighbour __rcu **np = &nht->hash_buckets[i];
206
207 while ((n = rcu_dereference_protected(*np,
208 lockdep_is_held(&tbl->lock))) != NULL) {
209 if (dev && n->dev != dev) {
210 np = &n->next;
211 continue;
212 }
213 rcu_assign_pointer(*np,
214 rcu_dereference_protected(n->next,
215 lockdep_is_held(&tbl->lock)));
216 write_lock(&n->lock);
217 neigh_del_timer(n);
218 n->dead = 1;
219
220 if (atomic_read(&n->refcnt) != 1) {
221 /* The most unpleasant situation.
222 We must destroy neighbour entry,
223 but someone still uses it.
224
225 The destroy will be delayed until
226 the last user releases us, but
227 we must kill timers etc. and move
228 it to safe state.
229 */
230 __skb_queue_purge(&n->arp_queue);
231 n->arp_queue_len_bytes = 0;
232 n->output = neigh_blackhole;
233 if (n->nud_state & NUD_VALID)
234 n->nud_state = NUD_NOARP;
235 else
236 n->nud_state = NUD_NONE;
237 neigh_dbg(2, "neigh %p is stray\n", n);
238 }
239 write_unlock(&n->lock);
240 neigh_cleanup_and_release(n);
241 }
242 }
243}
244
245void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
246{
247 write_lock_bh(&tbl->lock);
248 neigh_flush_dev(tbl, dev);
249 write_unlock_bh(&tbl->lock);
250}
251EXPORT_SYMBOL(neigh_changeaddr);
252
253int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
254{
255 write_lock_bh(&tbl->lock);
256 neigh_flush_dev(tbl, dev);
257 pneigh_ifdown(tbl, dev);
258 write_unlock_bh(&tbl->lock);
259
260 del_timer_sync(&tbl->proxy_timer);
261 pneigh_queue_purge(&tbl->proxy_queue);
262 return 0;
263}
264EXPORT_SYMBOL(neigh_ifdown);
265
266static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
267{
268 struct neighbour *n = NULL;
269 unsigned long now = jiffies;
270 int entries;
271
272 entries = atomic_inc_return(&tbl->entries) - 1;
273 if (entries >= tbl->gc_thresh3 ||
274 (entries >= tbl->gc_thresh2 &&
275 time_after(now, tbl->last_flush + 5 * HZ))) {
276 if (!neigh_forced_gc(tbl) &&
277 entries >= tbl->gc_thresh3) {
278 net_info_ratelimited("%s: neighbor table overflow!\n",
279 tbl->id);
280 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
281 goto out_entries;
282 }
283 }
284
285 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
286 if (!n)
287 goto out_entries;
288
289 __skb_queue_head_init(&n->arp_queue);
290 rwlock_init(&n->lock);
291 seqlock_init(&n->ha_lock);
292 n->updated = n->used = now;
293 n->nud_state = NUD_NONE;
294 n->output = neigh_blackhole;
295 seqlock_init(&n->hh.hh_lock);
296 n->parms = neigh_parms_clone(&tbl->parms);
297 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
298
299 NEIGH_CACHE_STAT_INC(tbl, allocs);
300 n->tbl = tbl;
301 atomic_set(&n->refcnt, 1);
302 n->dead = 1;
303out:
304 return n;
305
306out_entries:
307 atomic_dec(&tbl->entries);
308 goto out;
309}
310
311static void neigh_get_hash_rnd(u32 *x)
312{
313 get_random_bytes(x, sizeof(*x));
314 *x |= 1;
315}
316
317static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
318{
319 size_t size = (1 << shift) * sizeof(struct neighbour *);
320 struct neigh_hash_table *ret;
321 struct neighbour __rcu **buckets;
322 int i;
323
324 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
325 if (!ret)
326 return NULL;
327 if (size <= PAGE_SIZE)
328 buckets = kzalloc(size, GFP_ATOMIC);
329 else
330 buckets = (struct neighbour __rcu **)
331 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
332 get_order(size));
333 if (!buckets) {
334 kfree(ret);
335 return NULL;
336 }
337 ret->hash_buckets = buckets;
338 ret->hash_shift = shift;
339 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
340 neigh_get_hash_rnd(&ret->hash_rnd[i]);
341 return ret;
342}
343
344static void neigh_hash_free_rcu(struct rcu_head *head)
345{
346 struct neigh_hash_table *nht = container_of(head,
347 struct neigh_hash_table,
348 rcu);
349 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
350 struct neighbour __rcu **buckets = nht->hash_buckets;
351
352 if (size <= PAGE_SIZE)
353 kfree(buckets);
354 else
355 free_pages((unsigned long)buckets, get_order(size));
356 kfree(nht);
357}
358
359static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
360 unsigned long new_shift)
361{
362 unsigned int i, hash;
363 struct neigh_hash_table *new_nht, *old_nht;
364
365 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
366
367 old_nht = rcu_dereference_protected(tbl->nht,
368 lockdep_is_held(&tbl->lock));
369 new_nht = neigh_hash_alloc(new_shift);
370 if (!new_nht)
371 return old_nht;
372
373 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
374 struct neighbour *n, *next;
375
376 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
377 lockdep_is_held(&tbl->lock));
378 n != NULL;
379 n = next) {
380 hash = tbl->hash(n->primary_key, n->dev,
381 new_nht->hash_rnd);
382
383 hash >>= (32 - new_nht->hash_shift);
384 next = rcu_dereference_protected(n->next,
385 lockdep_is_held(&tbl->lock));
386
387 rcu_assign_pointer(n->next,
388 rcu_dereference_protected(
389 new_nht->hash_buckets[hash],
390 lockdep_is_held(&tbl->lock)));
391 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
392 }
393 }
394
395 rcu_assign_pointer(tbl->nht, new_nht);
396 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
397 return new_nht;
398}
399
400struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
401 struct net_device *dev)
402{
403 struct neighbour *n;
404
405 NEIGH_CACHE_STAT_INC(tbl, lookups);
406
407 rcu_read_lock_bh();
408 n = __neigh_lookup_noref(tbl, pkey, dev);
409 if (n) {
410 if (!atomic_inc_not_zero(&n->refcnt))
411 n = NULL;
412 NEIGH_CACHE_STAT_INC(tbl, hits);
413 }
414
415 rcu_read_unlock_bh();
416 return n;
417}
418EXPORT_SYMBOL(neigh_lookup);
419
420struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
421 const void *pkey)
422{
423 struct neighbour *n;
424 int key_len = tbl->key_len;
425 u32 hash_val;
426 struct neigh_hash_table *nht;
427
428 NEIGH_CACHE_STAT_INC(tbl, lookups);
429
430 rcu_read_lock_bh();
431 nht = rcu_dereference_bh(tbl->nht);
432 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
433
434 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
435 n != NULL;
436 n = rcu_dereference_bh(n->next)) {
437 if (!memcmp(n->primary_key, pkey, key_len) &&
438 net_eq(dev_net(n->dev), net)) {
439 if (!atomic_inc_not_zero(&n->refcnt))
440 n = NULL;
441 NEIGH_CACHE_STAT_INC(tbl, hits);
442 break;
443 }
444 }
445
446 rcu_read_unlock_bh();
447 return n;
448}
449EXPORT_SYMBOL(neigh_lookup_nodev);
450
451struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
452 struct net_device *dev, bool want_ref)
453{
454 u32 hash_val;
455 int key_len = tbl->key_len;
456 int error;
457 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
458 struct neigh_hash_table *nht;
459
460 if (!n) {
461 rc = ERR_PTR(-ENOBUFS);
462 goto out;
463 }
464
465 memcpy(n->primary_key, pkey, key_len);
466 n->dev = dev;
467 dev_hold(dev);
468
469 /* Protocol specific setup. */
470 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
471 rc = ERR_PTR(error);
472 goto out_neigh_release;
473 }
474
475 if (dev->netdev_ops->ndo_neigh_construct) {
476 error = dev->netdev_ops->ndo_neigh_construct(n);
477 if (error < 0) {
478 rc = ERR_PTR(error);
479 goto out_neigh_release;
480 }
481 }
482
483 /* Device specific setup. */
484 if (n->parms->neigh_setup &&
485 (error = n->parms->neigh_setup(n)) < 0) {
486 rc = ERR_PTR(error);
487 goto out_neigh_release;
488 }
489
490 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
491
492 write_lock_bh(&tbl->lock);
493 nht = rcu_dereference_protected(tbl->nht,
494 lockdep_is_held(&tbl->lock));
495
496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498
499 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500
501 if (n->parms->dead) {
502 rc = ERR_PTR(-EINVAL);
503 goto out_tbl_unlock;
504 }
505
506 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
507 lockdep_is_held(&tbl->lock));
508 n1 != NULL;
509 n1 = rcu_dereference_protected(n1->next,
510 lockdep_is_held(&tbl->lock))) {
511 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
512 if (want_ref)
513 neigh_hold(n1);
514 rc = n1;
515 goto out_tbl_unlock;
516 }
517 }
518
519 n->dead = 0;
520 if (want_ref)
521 neigh_hold(n);
522 rcu_assign_pointer(n->next,
523 rcu_dereference_protected(nht->hash_buckets[hash_val],
524 lockdep_is_held(&tbl->lock)));
525 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
526 write_unlock_bh(&tbl->lock);
527 neigh_dbg(2, "neigh %p is created\n", n);
528 rc = n;
529out:
530 return rc;
531out_tbl_unlock:
532 write_unlock_bh(&tbl->lock);
533out_neigh_release:
534 neigh_release(n);
535 goto out;
536}
537EXPORT_SYMBOL(__neigh_create);
538
539static u32 pneigh_hash(const void *pkey, int key_len)
540{
541 u32 hash_val = *(u32 *)(pkey + key_len - 4);
542 hash_val ^= (hash_val >> 16);
543 hash_val ^= hash_val >> 8;
544 hash_val ^= hash_val >> 4;
545 hash_val &= PNEIGH_HASHMASK;
546 return hash_val;
547}
548
549static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
550 struct net *net,
551 const void *pkey,
552 int key_len,
553 struct net_device *dev)
554{
555 while (n) {
556 if (!memcmp(n->key, pkey, key_len) &&
557 net_eq(pneigh_net(n), net) &&
558 (n->dev == dev || !n->dev))
559 return n;
560 n = n->next;
561 }
562 return NULL;
563}
564
565struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
566 struct net *net, const void *pkey, struct net_device *dev)
567{
568 int key_len = tbl->key_len;
569 u32 hash_val = pneigh_hash(pkey, key_len);
570
571 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
572 net, pkey, key_len, dev);
573}
574EXPORT_SYMBOL_GPL(__pneigh_lookup);
575
576struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
577 struct net *net, const void *pkey,
578 struct net_device *dev, int creat)
579{
580 struct pneigh_entry *n;
581 int key_len = tbl->key_len;
582 u32 hash_val = pneigh_hash(pkey, key_len);
583
584 read_lock_bh(&tbl->lock);
585 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
586 net, pkey, key_len, dev);
587 read_unlock_bh(&tbl->lock);
588
589 if (n || !creat)
590 goto out;
591
592 ASSERT_RTNL();
593
594 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
595 if (!n)
596 goto out;
597
598 write_pnet(&n->net, net);
599 memcpy(n->key, pkey, key_len);
600 n->dev = dev;
601 if (dev)
602 dev_hold(dev);
603
604 if (tbl->pconstructor && tbl->pconstructor(n)) {
605 if (dev)
606 dev_put(dev);
607 kfree(n);
608 n = NULL;
609 goto out;
610 }
611
612 write_lock_bh(&tbl->lock);
613 n->next = tbl->phash_buckets[hash_val];
614 tbl->phash_buckets[hash_val] = n;
615 write_unlock_bh(&tbl->lock);
616out:
617 return n;
618}
619EXPORT_SYMBOL(pneigh_lookup);
620
621
622int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
623 struct net_device *dev)
624{
625 struct pneigh_entry *n, **np;
626 int key_len = tbl->key_len;
627 u32 hash_val = pneigh_hash(pkey, key_len);
628
629 write_lock_bh(&tbl->lock);
630 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
631 np = &n->next) {
632 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
633 net_eq(pneigh_net(n), net)) {
634 *np = n->next;
635 write_unlock_bh(&tbl->lock);
636 if (tbl->pdestructor)
637 tbl->pdestructor(n);
638 if (n->dev)
639 dev_put(n->dev);
640 kfree(n);
641 return 0;
642 }
643 }
644 write_unlock_bh(&tbl->lock);
645 return -ENOENT;
646}
647
648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
649{
650 struct pneigh_entry *n, **np;
651 u32 h;
652
653 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
654 np = &tbl->phash_buckets[h];
655 while ((n = *np) != NULL) {
656 if (!dev || n->dev == dev) {
657 *np = n->next;
658 if (tbl->pdestructor)
659 tbl->pdestructor(n);
660 if (n->dev)
661 dev_put(n->dev);
662 kfree(n);
663 continue;
664 }
665 np = &n->next;
666 }
667 }
668 return -ENOENT;
669}
670
671static void neigh_parms_destroy(struct neigh_parms *parms);
672
673static inline void neigh_parms_put(struct neigh_parms *parms)
674{
675 if (atomic_dec_and_test(&parms->refcnt))
676 neigh_parms_destroy(parms);
677}
678
679/*
680 * neighbour must already be out of the table;
681 *
682 */
683void neigh_destroy(struct neighbour *neigh)
684{
685 struct net_device *dev = neigh->dev;
686
687 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
688
689 if (!neigh->dead) {
690 pr_warn("Destroying alive neighbour %p\n", neigh);
691 dump_stack();
692 return;
693 }
694
695 if (neigh_del_timer(neigh))
696 pr_warn("Impossible event\n");
697
698 write_lock_bh(&neigh->lock);
699 __skb_queue_purge(&neigh->arp_queue);
700 write_unlock_bh(&neigh->lock);
701 neigh->arp_queue_len_bytes = 0;
702
703 if (dev->netdev_ops->ndo_neigh_destroy)
704 dev->netdev_ops->ndo_neigh_destroy(neigh);
705
706 dev_put(dev);
707 neigh_parms_put(neigh->parms);
708
709 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
710
711 atomic_dec(&neigh->tbl->entries);
712 kfree_rcu(neigh, rcu);
713}
714EXPORT_SYMBOL(neigh_destroy);
715
716/* Neighbour state is suspicious;
717 disable fast path.
718
719 Called with write_locked neigh.
720 */
721static void neigh_suspect(struct neighbour *neigh)
722{
723 neigh_dbg(2, "neigh %p is suspected\n", neigh);
724
725 neigh->output = neigh->ops->output;
726}
727
728/* Neighbour state is OK;
729 enable fast path.
730
731 Called with write_locked neigh.
732 */
733static void neigh_connect(struct neighbour *neigh)
734{
735 neigh_dbg(2, "neigh %p is connected\n", neigh);
736
737 neigh->output = neigh->ops->connected_output;
738}
739
740static void neigh_periodic_work(struct work_struct *work)
741{
742 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
743 struct neighbour *n;
744 struct neighbour __rcu **np;
745 unsigned int i;
746 struct neigh_hash_table *nht;
747
748 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
749
750 write_lock_bh(&tbl->lock);
751 nht = rcu_dereference_protected(tbl->nht,
752 lockdep_is_held(&tbl->lock));
753
754 /*
755 * periodically recompute ReachableTime from random function
756 */
757
758 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
759 struct neigh_parms *p;
760 tbl->last_rand = jiffies;
761 list_for_each_entry(p, &tbl->parms_list, list)
762 p->reachable_time =
763 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
764 }
765
766 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
767 goto out;
768
769 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
770 np = &nht->hash_buckets[i];
771
772 while ((n = rcu_dereference_protected(*np,
773 lockdep_is_held(&tbl->lock))) != NULL) {
774 unsigned int state;
775
776 write_lock(&n->lock);
777
778 state = n->nud_state;
779 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
780 write_unlock(&n->lock);
781 goto next_elt;
782 }
783
784 if (time_before(n->used, n->confirmed))
785 n->used = n->confirmed;
786
787 if (atomic_read(&n->refcnt) == 1 &&
788 (state == NUD_FAILED ||
789 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
790 *np = n->next;
791 n->dead = 1;
792 write_unlock(&n->lock);
793 neigh_cleanup_and_release(n);
794 continue;
795 }
796 write_unlock(&n->lock);
797
798next_elt:
799 np = &n->next;
800 }
801 /*
802 * It's fine to release lock here, even if hash table
803 * grows while we are preempted.
804 */
805 write_unlock_bh(&tbl->lock);
806 cond_resched();
807 write_lock_bh(&tbl->lock);
808 nht = rcu_dereference_protected(tbl->nht,
809 lockdep_is_held(&tbl->lock));
810 }
811out:
812 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
813 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
814 * BASE_REACHABLE_TIME.
815 */
816 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
817 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
818 write_unlock_bh(&tbl->lock);
819}
820
821static __inline__ int neigh_max_probes(struct neighbour *n)
822{
823 struct neigh_parms *p = n->parms;
824 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
825 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
826 NEIGH_VAR(p, MCAST_PROBES));
827}
828
829static void neigh_invalidate(struct neighbour *neigh)
830 __releases(neigh->lock)
831 __acquires(neigh->lock)
832{
833 struct sk_buff *skb;
834
835 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
836 neigh_dbg(2, "neigh %p is failed\n", neigh);
837 neigh->updated = jiffies;
838
839 /* It is very thin place. report_unreachable is very complicated
840 routine. Particularly, it can hit the same neighbour entry!
841
842 So that, we try to be accurate and avoid dead loop. --ANK
843 */
844 while (neigh->nud_state == NUD_FAILED &&
845 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
846 write_unlock(&neigh->lock);
847 neigh->ops->error_report(neigh, skb);
848 write_lock(&neigh->lock);
849 }
850 __skb_queue_purge(&neigh->arp_queue);
851 neigh->arp_queue_len_bytes = 0;
852}
853
854static void neigh_probe(struct neighbour *neigh)
855 __releases(neigh->lock)
856{
857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
858 /* keep skb alive even if arp_queue overflows */
859 if (skb)
860 skb = skb_clone(skb, GFP_ATOMIC);
861 write_unlock(&neigh->lock);
862 neigh->ops->solicit(neigh, skb);
863 atomic_inc(&neigh->probes);
864 kfree_skb(skb);
865}
866
867/* Called when a timer expires for a neighbour entry. */
868
869static void neigh_timer_handler(unsigned long arg)
870{
871 unsigned long now, next;
872 struct neighbour *neigh = (struct neighbour *)arg;
873 unsigned int state;
874 int notify = 0;
875
876 write_lock(&neigh->lock);
877
878 state = neigh->nud_state;
879 now = jiffies;
880 next = now + HZ;
881
882 if (!(state & NUD_IN_TIMER))
883 goto out;
884
885 if (state & NUD_REACHABLE) {
886 if (time_before_eq(now,
887 neigh->confirmed + neigh->parms->reachable_time)) {
888 neigh_dbg(2, "neigh %p is still alive\n", neigh);
889 next = neigh->confirmed + neigh->parms->reachable_time;
890 } else if (time_before_eq(now,
891 neigh->used +
892 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
893 neigh_dbg(2, "neigh %p is delayed\n", neigh);
894 neigh->nud_state = NUD_DELAY;
895 neigh->updated = jiffies;
896 neigh_suspect(neigh);
897 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
898 } else {
899 neigh_dbg(2, "neigh %p is suspected\n", neigh);
900 neigh->nud_state = NUD_STALE;
901 neigh->updated = jiffies;
902 neigh_suspect(neigh);
903 notify = 1;
904 }
905 } else if (state & NUD_DELAY) {
906 if (time_before_eq(now,
907 neigh->confirmed +
908 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
909 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
910 neigh->nud_state = NUD_REACHABLE;
911 neigh->updated = jiffies;
912 neigh_connect(neigh);
913 notify = 1;
914 next = neigh->confirmed + neigh->parms->reachable_time;
915 } else {
916 neigh_dbg(2, "neigh %p is probed\n", neigh);
917 neigh->nud_state = NUD_PROBE;
918 neigh->updated = jiffies;
919 atomic_set(&neigh->probes, 0);
920 notify = 1;
921 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
922 }
923 } else {
924 /* NUD_PROBE|NUD_INCOMPLETE */
925 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
926 }
927
928 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
929 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
930 neigh->nud_state = NUD_FAILED;
931 notify = 1;
932 neigh_invalidate(neigh);
933 goto out;
934 }
935
936 if (neigh->nud_state & NUD_IN_TIMER) {
937 if (time_before(next, jiffies + HZ/2))
938 next = jiffies + HZ/2;
939 if (!mod_timer(&neigh->timer, next))
940 neigh_hold(neigh);
941 }
942 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
943 neigh_probe(neigh);
944 } else {
945out:
946 write_unlock(&neigh->lock);
947 }
948
949 if (notify)
950 neigh_update_notify(neigh);
951
952 neigh_release(neigh);
953}
954
955int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
956{
957 int rc;
958 bool immediate_probe = false;
959
960 write_lock_bh(&neigh->lock);
961
962 rc = 0;
963 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
964 goto out_unlock_bh;
965 if (neigh->dead)
966 goto out_dead;
967
968 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
969 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
970 NEIGH_VAR(neigh->parms, APP_PROBES)) {
971 unsigned long next, now = jiffies;
972
973 atomic_set(&neigh->probes,
974 NEIGH_VAR(neigh->parms, UCAST_PROBES));
975 neigh->nud_state = NUD_INCOMPLETE;
976 neigh->updated = now;
977 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
978 HZ/2);
979 neigh_add_timer(neigh, next);
980 immediate_probe = true;
981 } else {
982 neigh->nud_state = NUD_FAILED;
983 neigh->updated = jiffies;
984 write_unlock_bh(&neigh->lock);
985
986 kfree_skb(skb);
987 return 1;
988 }
989 } else if (neigh->nud_state & NUD_STALE) {
990 neigh_dbg(2, "neigh %p is delayed\n", neigh);
991 neigh->nud_state = NUD_DELAY;
992 neigh->updated = jiffies;
993 neigh_add_timer(neigh, jiffies +
994 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
995 }
996
997 if (neigh->nud_state == NUD_INCOMPLETE) {
998 if (skb) {
999 while (neigh->arp_queue_len_bytes + skb->truesize >
1000 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1001 struct sk_buff *buff;
1002
1003 buff = __skb_dequeue(&neigh->arp_queue);
1004 if (!buff)
1005 break;
1006 neigh->arp_queue_len_bytes -= buff->truesize;
1007 kfree_skb(buff);
1008 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1009 }
1010 skb_dst_force(skb);
1011 __skb_queue_tail(&neigh->arp_queue, skb);
1012 neigh->arp_queue_len_bytes += skb->truesize;
1013 }
1014 rc = 1;
1015 }
1016out_unlock_bh:
1017 if (immediate_probe)
1018 neigh_probe(neigh);
1019 else
1020 write_unlock(&neigh->lock);
1021 local_bh_enable();
1022 return rc;
1023
1024out_dead:
1025 if (neigh->nud_state & NUD_STALE)
1026 goto out_unlock_bh;
1027 write_unlock_bh(&neigh->lock);
1028 kfree_skb(skb);
1029 return 1;
1030}
1031EXPORT_SYMBOL(__neigh_event_send);
1032
1033static void neigh_update_hhs(struct neighbour *neigh)
1034{
1035 struct hh_cache *hh;
1036 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1037 = NULL;
1038
1039 if (neigh->dev->header_ops)
1040 update = neigh->dev->header_ops->cache_update;
1041
1042 if (update) {
1043 hh = &neigh->hh;
1044 if (hh->hh_len) {
1045 write_seqlock_bh(&hh->hh_lock);
1046 update(hh, neigh->dev, neigh->ha);
1047 write_sequnlock_bh(&hh->hh_lock);
1048 }
1049 }
1050}
1051
1052
1053
1054/* Generic update routine.
1055 -- lladdr is new lladdr or NULL, if it is not supplied.
1056 -- new is new state.
1057 -- flags
1058 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1059 if it is different.
1060 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1061 lladdr instead of overriding it
1062 if it is different.
1063 It also allows to retain current state
1064 if lladdr is unchanged.
1065 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1066
1067 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1068 NTF_ROUTER flag.
1069 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1070 a router.
1071
1072 Caller MUST hold reference count on the entry.
1073 */
1074
1075int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1076 u32 flags)
1077{
1078 u8 old;
1079 int err;
1080 int notify = 0;
1081 struct net_device *dev;
1082 int update_isrouter = 0;
1083
1084 write_lock_bh(&neigh->lock);
1085
1086 dev = neigh->dev;
1087 old = neigh->nud_state;
1088 err = -EPERM;
1089
1090 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1091 (old & (NUD_NOARP | NUD_PERMANENT)))
1092 goto out;
1093 if (neigh->dead)
1094 goto out;
1095
1096 if (!(new & NUD_VALID)) {
1097 neigh_del_timer(neigh);
1098 if (old & NUD_CONNECTED)
1099 neigh_suspect(neigh);
1100 neigh->nud_state = new;
1101 err = 0;
1102 notify = old & NUD_VALID;
1103 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1104 (new & NUD_FAILED)) {
1105 neigh_invalidate(neigh);
1106 notify = 1;
1107 }
1108 goto out;
1109 }
1110
1111 /* Compare new lladdr with cached one */
1112 if (!dev->addr_len) {
1113 /* First case: device needs no address. */
1114 lladdr = neigh->ha;
1115 } else if (lladdr) {
1116 /* The second case: if something is already cached
1117 and a new address is proposed:
1118 - compare new & old
1119 - if they are different, check override flag
1120 */
1121 if ((old & NUD_VALID) &&
1122 !memcmp(lladdr, neigh->ha, dev->addr_len))
1123 lladdr = neigh->ha;
1124 } else {
1125 /* No address is supplied; if we know something,
1126 use it, otherwise discard the request.
1127 */
1128 err = -EINVAL;
1129 if (!(old & NUD_VALID))
1130 goto out;
1131 lladdr = neigh->ha;
1132 }
1133
1134 if (new & NUD_CONNECTED)
1135 neigh->confirmed = jiffies;
1136 neigh->updated = jiffies;
1137
1138 /* If entry was valid and address is not changed,
1139 do not change entry state, if new one is STALE.
1140 */
1141 err = 0;
1142 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1143 if (old & NUD_VALID) {
1144 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1145 update_isrouter = 0;
1146 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1147 (old & NUD_CONNECTED)) {
1148 lladdr = neigh->ha;
1149 new = NUD_STALE;
1150 } else
1151 goto out;
1152 } else {
1153 if (lladdr == neigh->ha && new == NUD_STALE &&
1154 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1155 (old & NUD_CONNECTED))
1156 )
1157 new = old;
1158 }
1159 }
1160
1161 if (new != old) {
1162 neigh_del_timer(neigh);
1163 if (new & NUD_PROBE)
1164 atomic_set(&neigh->probes, 0);
1165 if (new & NUD_IN_TIMER)
1166 neigh_add_timer(neigh, (jiffies +
1167 ((new & NUD_REACHABLE) ?
1168 neigh->parms->reachable_time :
1169 0)));
1170 neigh->nud_state = new;
1171 notify = 1;
1172 }
1173
1174 if (lladdr != neigh->ha) {
1175 write_seqlock(&neigh->ha_lock);
1176 memcpy(&neigh->ha, lladdr, dev->addr_len);
1177 write_sequnlock(&neigh->ha_lock);
1178 neigh_update_hhs(neigh);
1179 if (!(new & NUD_CONNECTED))
1180 neigh->confirmed = jiffies -
1181 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1182 notify = 1;
1183 }
1184 if (new == old)
1185 goto out;
1186 if (new & NUD_CONNECTED)
1187 neigh_connect(neigh);
1188 else
1189 neigh_suspect(neigh);
1190 if (!(old & NUD_VALID)) {
1191 struct sk_buff *skb;
1192
1193 /* Again: avoid dead loop if something went wrong */
1194
1195 while (neigh->nud_state & NUD_VALID &&
1196 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1197 struct dst_entry *dst = skb_dst(skb);
1198 struct neighbour *n2, *n1 = neigh;
1199 write_unlock_bh(&neigh->lock);
1200
1201 rcu_read_lock();
1202
1203 /* Why not just use 'neigh' as-is? The problem is that
1204 * things such as shaper, eql, and sch_teql can end up
1205 * using alternative, different, neigh objects to output
1206 * the packet in the output path. So what we need to do
1207 * here is re-lookup the top-level neigh in the path so
1208 * we can reinject the packet there.
1209 */
1210 n2 = NULL;
1211 if (dst) {
1212 n2 = dst_neigh_lookup_skb(dst, skb);
1213 if (n2)
1214 n1 = n2;
1215 }
1216 n1->output(n1, skb);
1217 if (n2)
1218 neigh_release(n2);
1219 rcu_read_unlock();
1220
1221 write_lock_bh(&neigh->lock);
1222 }
1223 __skb_queue_purge(&neigh->arp_queue);
1224 neigh->arp_queue_len_bytes = 0;
1225 }
1226out:
1227 if (update_isrouter) {
1228 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1229 (neigh->flags | NTF_ROUTER) :
1230 (neigh->flags & ~NTF_ROUTER);
1231 }
1232 write_unlock_bh(&neigh->lock);
1233
1234 if (notify)
1235 neigh_update_notify(neigh);
1236
1237 return err;
1238}
1239EXPORT_SYMBOL(neigh_update);
1240
1241/* Update the neigh to listen temporarily for probe responses, even if it is
1242 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1243 */
1244void __neigh_set_probe_once(struct neighbour *neigh)
1245{
1246 if (neigh->dead)
1247 return;
1248 neigh->updated = jiffies;
1249 if (!(neigh->nud_state & NUD_FAILED))
1250 return;
1251 neigh->nud_state = NUD_INCOMPLETE;
1252 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 neigh_add_timer(neigh,
1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255}
1256EXPORT_SYMBOL(__neigh_set_probe_once);
1257
1258struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1259 u8 *lladdr, void *saddr,
1260 struct net_device *dev)
1261{
1262 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1263 lladdr || !dev->addr_len);
1264 if (neigh)
1265 neigh_update(neigh, lladdr, NUD_STALE,
1266 NEIGH_UPDATE_F_OVERRIDE);
1267 return neigh;
1268}
1269EXPORT_SYMBOL(neigh_event_ns);
1270
1271/* called with read_lock_bh(&n->lock); */
1272static void neigh_hh_init(struct neighbour *n)
1273{
1274 struct net_device *dev = n->dev;
1275 __be16 prot = n->tbl->protocol;
1276 struct hh_cache *hh = &n->hh;
1277
1278 write_lock_bh(&n->lock);
1279
1280 /* Only one thread can come in here and initialize the
1281 * hh_cache entry.
1282 */
1283 if (!hh->hh_len)
1284 dev->header_ops->cache(n, hh, prot);
1285
1286 write_unlock_bh(&n->lock);
1287}
1288
1289/* Slow and careful. */
1290
1291int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1292{
1293 int rc = 0;
1294
1295 if (!neigh_event_send(neigh, skb)) {
1296 int err;
1297 struct net_device *dev = neigh->dev;
1298 unsigned int seq;
1299
1300 if (dev->header_ops->cache && !neigh->hh.hh_len)
1301 neigh_hh_init(neigh);
1302
1303 do {
1304 __skb_pull(skb, skb_network_offset(skb));
1305 seq = read_seqbegin(&neigh->ha_lock);
1306 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1307 neigh->ha, NULL, skb->len);
1308 } while (read_seqretry(&neigh->ha_lock, seq));
1309
1310 if (err >= 0)
1311 rc = dev_queue_xmit(skb);
1312 else
1313 goto out_kfree_skb;
1314 }
1315out:
1316 return rc;
1317out_kfree_skb:
1318 rc = -EINVAL;
1319 kfree_skb(skb);
1320 goto out;
1321}
1322EXPORT_SYMBOL(neigh_resolve_output);
1323
1324/* As fast as possible without hh cache */
1325
1326int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1327{
1328 struct net_device *dev = neigh->dev;
1329 unsigned int seq;
1330 int err;
1331
1332 do {
1333 __skb_pull(skb, skb_network_offset(skb));
1334 seq = read_seqbegin(&neigh->ha_lock);
1335 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1336 neigh->ha, NULL, skb->len);
1337 } while (read_seqretry(&neigh->ha_lock, seq));
1338
1339 if (err >= 0)
1340 err = dev_queue_xmit(skb);
1341 else {
1342 err = -EINVAL;
1343 kfree_skb(skb);
1344 }
1345 return err;
1346}
1347EXPORT_SYMBOL(neigh_connected_output);
1348
1349int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1350{
1351 return dev_queue_xmit(skb);
1352}
1353EXPORT_SYMBOL(neigh_direct_output);
1354
1355static void neigh_proxy_process(unsigned long arg)
1356{
1357 struct neigh_table *tbl = (struct neigh_table *)arg;
1358 long sched_next = 0;
1359 unsigned long now = jiffies;
1360 struct sk_buff *skb, *n;
1361
1362 spin_lock(&tbl->proxy_queue.lock);
1363
1364 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1365 long tdif = NEIGH_CB(skb)->sched_next - now;
1366
1367 if (tdif <= 0) {
1368 struct net_device *dev = skb->dev;
1369
1370 __skb_unlink(skb, &tbl->proxy_queue);
1371 if (tbl->proxy_redo && netif_running(dev)) {
1372 rcu_read_lock();
1373 tbl->proxy_redo(skb);
1374 rcu_read_unlock();
1375 } else {
1376 kfree_skb(skb);
1377 }
1378
1379 dev_put(dev);
1380 } else if (!sched_next || tdif < sched_next)
1381 sched_next = tdif;
1382 }
1383 del_timer(&tbl->proxy_timer);
1384 if (sched_next)
1385 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1386 spin_unlock(&tbl->proxy_queue.lock);
1387}
1388
1389void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1390 struct sk_buff *skb)
1391{
1392 unsigned long now = jiffies;
1393
1394 unsigned long sched_next = now + (prandom_u32() %
1395 NEIGH_VAR(p, PROXY_DELAY));
1396
1397 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1398 kfree_skb(skb);
1399 return;
1400 }
1401
1402 NEIGH_CB(skb)->sched_next = sched_next;
1403 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1404
1405 spin_lock(&tbl->proxy_queue.lock);
1406 if (del_timer(&tbl->proxy_timer)) {
1407 if (time_before(tbl->proxy_timer.expires, sched_next))
1408 sched_next = tbl->proxy_timer.expires;
1409 }
1410 skb_dst_drop(skb);
1411 dev_hold(skb->dev);
1412 __skb_queue_tail(&tbl->proxy_queue, skb);
1413 mod_timer(&tbl->proxy_timer, sched_next);
1414 spin_unlock(&tbl->proxy_queue.lock);
1415}
1416EXPORT_SYMBOL(pneigh_enqueue);
1417
1418static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1419 struct net *net, int ifindex)
1420{
1421 struct neigh_parms *p;
1422
1423 list_for_each_entry(p, &tbl->parms_list, list) {
1424 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1425 (!p->dev && !ifindex && net_eq(net, &init_net)))
1426 return p;
1427 }
1428
1429 return NULL;
1430}
1431
1432struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1433 struct neigh_table *tbl)
1434{
1435 struct neigh_parms *p;
1436 struct net *net = dev_net(dev);
1437 const struct net_device_ops *ops = dev->netdev_ops;
1438
1439 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1440 if (p) {
1441 p->tbl = tbl;
1442 atomic_set(&p->refcnt, 1);
1443 p->reachable_time =
1444 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1445 dev_hold(dev);
1446 p->dev = dev;
1447 write_pnet(&p->net, net);
1448 p->sysctl_table = NULL;
1449
1450 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1451 dev_put(dev);
1452 kfree(p);
1453 return NULL;
1454 }
1455
1456 write_lock_bh(&tbl->lock);
1457 list_add(&p->list, &tbl->parms.list);
1458 write_unlock_bh(&tbl->lock);
1459
1460 neigh_parms_data_state_cleanall(p);
1461 }
1462 return p;
1463}
1464EXPORT_SYMBOL(neigh_parms_alloc);
1465
1466static void neigh_rcu_free_parms(struct rcu_head *head)
1467{
1468 struct neigh_parms *parms =
1469 container_of(head, struct neigh_parms, rcu_head);
1470
1471 neigh_parms_put(parms);
1472}
1473
1474void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1475{
1476 if (!parms || parms == &tbl->parms)
1477 return;
1478 write_lock_bh(&tbl->lock);
1479 list_del(&parms->list);
1480 parms->dead = 1;
1481 write_unlock_bh(&tbl->lock);
1482 if (parms->dev)
1483 dev_put(parms->dev);
1484 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1485}
1486EXPORT_SYMBOL(neigh_parms_release);
1487
1488static void neigh_parms_destroy(struct neigh_parms *parms)
1489{
1490 kfree(parms);
1491}
1492
1493static struct lock_class_key neigh_table_proxy_queue_class;
1494
1495static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1496
1497void neigh_table_init(int index, struct neigh_table *tbl)
1498{
1499 unsigned long now = jiffies;
1500 unsigned long phsize;
1501
1502 INIT_LIST_HEAD(&tbl->parms_list);
1503 list_add(&tbl->parms.list, &tbl->parms_list);
1504 write_pnet(&tbl->parms.net, &init_net);
1505 atomic_set(&tbl->parms.refcnt, 1);
1506 tbl->parms.reachable_time =
1507 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1508
1509 tbl->stats = alloc_percpu(struct neigh_statistics);
1510 if (!tbl->stats)
1511 panic("cannot create neighbour cache statistics");
1512
1513#ifdef CONFIG_PROC_FS
1514 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1515 &neigh_stat_seq_fops, tbl))
1516 panic("cannot create neighbour proc dir entry");
1517#endif
1518
1519 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1520
1521 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1522 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1523
1524 if (!tbl->nht || !tbl->phash_buckets)
1525 panic("cannot allocate neighbour cache hashes");
1526
1527 if (!tbl->entry_size)
1528 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1529 tbl->key_len, NEIGH_PRIV_ALIGN);
1530 else
1531 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1532
1533 rwlock_init(&tbl->lock);
1534 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1535 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1536 tbl->parms.reachable_time);
1537 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1538 skb_queue_head_init_class(&tbl->proxy_queue,
1539 &neigh_table_proxy_queue_class);
1540
1541 tbl->last_flush = now;
1542 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1543
1544 neigh_tables[index] = tbl;
1545}
1546EXPORT_SYMBOL(neigh_table_init);
1547
1548int neigh_table_clear(int index, struct neigh_table *tbl)
1549{
1550 neigh_tables[index] = NULL;
1551 /* It is not clean... Fix it to unload IPv6 module safely */
1552 cancel_delayed_work_sync(&tbl->gc_work);
1553 del_timer_sync(&tbl->proxy_timer);
1554 pneigh_queue_purge(&tbl->proxy_queue);
1555 neigh_ifdown(tbl, NULL);
1556 if (atomic_read(&tbl->entries))
1557 pr_crit("neighbour leakage\n");
1558
1559 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1560 neigh_hash_free_rcu);
1561 tbl->nht = NULL;
1562
1563 kfree(tbl->phash_buckets);
1564 tbl->phash_buckets = NULL;
1565
1566 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1567
1568 free_percpu(tbl->stats);
1569 tbl->stats = NULL;
1570
1571 return 0;
1572}
1573EXPORT_SYMBOL(neigh_table_clear);
1574
1575static struct neigh_table *neigh_find_table(int family)
1576{
1577 struct neigh_table *tbl = NULL;
1578
1579 switch (family) {
1580 case AF_INET:
1581 tbl = neigh_tables[NEIGH_ARP_TABLE];
1582 break;
1583 case AF_INET6:
1584 tbl = neigh_tables[NEIGH_ND_TABLE];
1585 break;
1586 case AF_DECnet:
1587 tbl = neigh_tables[NEIGH_DN_TABLE];
1588 break;
1589 }
1590
1591 return tbl;
1592}
1593
1594static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1595{
1596 struct net *net = sock_net(skb->sk);
1597 struct ndmsg *ndm;
1598 struct nlattr *dst_attr;
1599 struct neigh_table *tbl;
1600 struct neighbour *neigh;
1601 struct net_device *dev = NULL;
1602 int err = -EINVAL;
1603
1604 ASSERT_RTNL();
1605 if (nlmsg_len(nlh) < sizeof(*ndm))
1606 goto out;
1607
1608 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1609 if (dst_attr == NULL)
1610 goto out;
1611
1612 ndm = nlmsg_data(nlh);
1613 if (ndm->ndm_ifindex) {
1614 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1615 if (dev == NULL) {
1616 err = -ENODEV;
1617 goto out;
1618 }
1619 }
1620
1621 tbl = neigh_find_table(ndm->ndm_family);
1622 if (tbl == NULL)
1623 return -EAFNOSUPPORT;
1624
1625 if (nla_len(dst_attr) < tbl->key_len)
1626 goto out;
1627
1628 if (ndm->ndm_flags & NTF_PROXY) {
1629 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1630 goto out;
1631 }
1632
1633 if (dev == NULL)
1634 goto out;
1635
1636 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1637 if (neigh == NULL) {
1638 err = -ENOENT;
1639 goto out;
1640 }
1641
1642 err = neigh_update(neigh, NULL, NUD_FAILED,
1643 NEIGH_UPDATE_F_OVERRIDE |
1644 NEIGH_UPDATE_F_ADMIN);
1645 neigh_release(neigh);
1646
1647out:
1648 return err;
1649}
1650
1651static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1652{
1653 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1654 struct net *net = sock_net(skb->sk);
1655 struct ndmsg *ndm;
1656 struct nlattr *tb[NDA_MAX+1];
1657 struct neigh_table *tbl;
1658 struct net_device *dev = NULL;
1659 struct neighbour *neigh;
1660 void *dst, *lladdr;
1661 int err;
1662
1663 ASSERT_RTNL();
1664 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1665 if (err < 0)
1666 goto out;
1667
1668 err = -EINVAL;
1669 if (tb[NDA_DST] == NULL)
1670 goto out;
1671
1672 ndm = nlmsg_data(nlh);
1673 if (ndm->ndm_ifindex) {
1674 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1675 if (dev == NULL) {
1676 err = -ENODEV;
1677 goto out;
1678 }
1679
1680 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1681 goto out;
1682 }
1683
1684 tbl = neigh_find_table(ndm->ndm_family);
1685 if (tbl == NULL)
1686 return -EAFNOSUPPORT;
1687
1688 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1689 goto out;
1690 dst = nla_data(tb[NDA_DST]);
1691 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1692
1693 if (ndm->ndm_flags & NTF_PROXY) {
1694 struct pneigh_entry *pn;
1695
1696 err = -ENOBUFS;
1697 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1698 if (pn) {
1699 pn->flags = ndm->ndm_flags;
1700 err = 0;
1701 }
1702 goto out;
1703 }
1704
1705 if (dev == NULL)
1706 goto out;
1707
1708 neigh = neigh_lookup(tbl, dst, dev);
1709 if (neigh == NULL) {
1710 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1711 err = -ENOENT;
1712 goto out;
1713 }
1714
1715 neigh = __neigh_lookup_errno(tbl, dst, dev);
1716 if (IS_ERR(neigh)) {
1717 err = PTR_ERR(neigh);
1718 goto out;
1719 }
1720 } else {
1721 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1722 err = -EEXIST;
1723 neigh_release(neigh);
1724 goto out;
1725 }
1726
1727 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1728 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1729 }
1730
1731 if (ndm->ndm_flags & NTF_USE) {
1732 neigh_event_send(neigh, NULL);
1733 err = 0;
1734 } else
1735 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1736 neigh_release(neigh);
1737
1738out:
1739 return err;
1740}
1741
1742static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1743{
1744 struct nlattr *nest;
1745
1746 nest = nla_nest_start(skb, NDTA_PARMS);
1747 if (nest == NULL)
1748 return -ENOBUFS;
1749
1750 if ((parms->dev &&
1751 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1752 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1753 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1754 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1755 /* approximative value for deprecated QUEUE_LEN (in packets) */
1756 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1757 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1758 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1759 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1760 nla_put_u32(skb, NDTPA_UCAST_PROBES,
1761 NEIGH_VAR(parms, UCAST_PROBES)) ||
1762 nla_put_u32(skb, NDTPA_MCAST_PROBES,
1763 NEIGH_VAR(parms, MCAST_PROBES)) ||
1764 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1765 NEIGH_VAR(parms, MCAST_REPROBES)) ||
1766 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1767 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1768 NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1769 nla_put_msecs(skb, NDTPA_GC_STALETIME,
1770 NEIGH_VAR(parms, GC_STALETIME)) ||
1771 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1772 NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1773 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1774 NEIGH_VAR(parms, RETRANS_TIME)) ||
1775 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1776 NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1777 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1778 NEIGH_VAR(parms, PROXY_DELAY)) ||
1779 nla_put_msecs(skb, NDTPA_LOCKTIME,
1780 NEIGH_VAR(parms, LOCKTIME)))
1781 goto nla_put_failure;
1782 return nla_nest_end(skb, nest);
1783
1784nla_put_failure:
1785 nla_nest_cancel(skb, nest);
1786 return -EMSGSIZE;
1787}
1788
1789static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1790 u32 pid, u32 seq, int type, int flags)
1791{
1792 struct nlmsghdr *nlh;
1793 struct ndtmsg *ndtmsg;
1794
1795 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1796 if (nlh == NULL)
1797 return -EMSGSIZE;
1798
1799 ndtmsg = nlmsg_data(nlh);
1800
1801 read_lock_bh(&tbl->lock);
1802 ndtmsg->ndtm_family = tbl->family;
1803 ndtmsg->ndtm_pad1 = 0;
1804 ndtmsg->ndtm_pad2 = 0;
1805
1806 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1807 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1808 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1809 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1810 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1811 goto nla_put_failure;
1812 {
1813 unsigned long now = jiffies;
1814 unsigned int flush_delta = now - tbl->last_flush;
1815 unsigned int rand_delta = now - tbl->last_rand;
1816 struct neigh_hash_table *nht;
1817 struct ndt_config ndc = {
1818 .ndtc_key_len = tbl->key_len,
1819 .ndtc_entry_size = tbl->entry_size,
1820 .ndtc_entries = atomic_read(&tbl->entries),
1821 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1822 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1823 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1824 };
1825
1826 rcu_read_lock_bh();
1827 nht = rcu_dereference_bh(tbl->nht);
1828 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1829 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1830 rcu_read_unlock_bh();
1831
1832 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1833 goto nla_put_failure;
1834 }
1835
1836 {
1837 int cpu;
1838 struct ndt_stats ndst;
1839
1840 memset(&ndst, 0, sizeof(ndst));
1841
1842 for_each_possible_cpu(cpu) {
1843 struct neigh_statistics *st;
1844
1845 st = per_cpu_ptr(tbl->stats, cpu);
1846 ndst.ndts_allocs += st->allocs;
1847 ndst.ndts_destroys += st->destroys;
1848 ndst.ndts_hash_grows += st->hash_grows;
1849 ndst.ndts_res_failed += st->res_failed;
1850 ndst.ndts_lookups += st->lookups;
1851 ndst.ndts_hits += st->hits;
1852 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1853 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1854 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1855 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1856 ndst.ndts_table_fulls += st->table_fulls;
1857 }
1858
1859 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1860 goto nla_put_failure;
1861 }
1862
1863 BUG_ON(tbl->parms.dev);
1864 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1865 goto nla_put_failure;
1866
1867 read_unlock_bh(&tbl->lock);
1868 nlmsg_end(skb, nlh);
1869 return 0;
1870
1871nla_put_failure:
1872 read_unlock_bh(&tbl->lock);
1873 nlmsg_cancel(skb, nlh);
1874 return -EMSGSIZE;
1875}
1876
1877static int neightbl_fill_param_info(struct sk_buff *skb,
1878 struct neigh_table *tbl,
1879 struct neigh_parms *parms,
1880 u32 pid, u32 seq, int type,
1881 unsigned int flags)
1882{
1883 struct ndtmsg *ndtmsg;
1884 struct nlmsghdr *nlh;
1885
1886 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1887 if (nlh == NULL)
1888 return -EMSGSIZE;
1889
1890 ndtmsg = nlmsg_data(nlh);
1891
1892 read_lock_bh(&tbl->lock);
1893 ndtmsg->ndtm_family = tbl->family;
1894 ndtmsg->ndtm_pad1 = 0;
1895 ndtmsg->ndtm_pad2 = 0;
1896
1897 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1898 neightbl_fill_parms(skb, parms) < 0)
1899 goto errout;
1900
1901 read_unlock_bh(&tbl->lock);
1902 nlmsg_end(skb, nlh);
1903 return 0;
1904errout:
1905 read_unlock_bh(&tbl->lock);
1906 nlmsg_cancel(skb, nlh);
1907 return -EMSGSIZE;
1908}
1909
1910static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1911 [NDTA_NAME] = { .type = NLA_STRING },
1912 [NDTA_THRESH1] = { .type = NLA_U32 },
1913 [NDTA_THRESH2] = { .type = NLA_U32 },
1914 [NDTA_THRESH3] = { .type = NLA_U32 },
1915 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1916 [NDTA_PARMS] = { .type = NLA_NESTED },
1917};
1918
1919static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1920 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1921 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1922 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1923 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1924 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1925 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1926 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
1927 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1928 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1929 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1930 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1931 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1932 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1933 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1934};
1935
1936static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1937{
1938 struct net *net = sock_net(skb->sk);
1939 struct neigh_table *tbl;
1940 struct ndtmsg *ndtmsg;
1941 struct nlattr *tb[NDTA_MAX+1];
1942 bool found = false;
1943 int err, tidx;
1944
1945 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1946 nl_neightbl_policy);
1947 if (err < 0)
1948 goto errout;
1949
1950 if (tb[NDTA_NAME] == NULL) {
1951 err = -EINVAL;
1952 goto errout;
1953 }
1954
1955 ndtmsg = nlmsg_data(nlh);
1956
1957 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1958 tbl = neigh_tables[tidx];
1959 if (!tbl)
1960 continue;
1961 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1962 continue;
1963 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1964 found = true;
1965 break;
1966 }
1967 }
1968
1969 if (!found)
1970 return -ENOENT;
1971
1972 /*
1973 * We acquire tbl->lock to be nice to the periodic timers and
1974 * make sure they always see a consistent set of values.
1975 */
1976 write_lock_bh(&tbl->lock);
1977
1978 if (tb[NDTA_PARMS]) {
1979 struct nlattr *tbp[NDTPA_MAX+1];
1980 struct neigh_parms *p;
1981 int i, ifindex = 0;
1982
1983 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1984 nl_ntbl_parm_policy);
1985 if (err < 0)
1986 goto errout_tbl_lock;
1987
1988 if (tbp[NDTPA_IFINDEX])
1989 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1990
1991 p = lookup_neigh_parms(tbl, net, ifindex);
1992 if (p == NULL) {
1993 err = -ENOENT;
1994 goto errout_tbl_lock;
1995 }
1996
1997 for (i = 1; i <= NDTPA_MAX; i++) {
1998 if (tbp[i] == NULL)
1999 continue;
2000
2001 switch (i) {
2002 case NDTPA_QUEUE_LEN:
2003 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2004 nla_get_u32(tbp[i]) *
2005 SKB_TRUESIZE(ETH_FRAME_LEN));
2006 break;
2007 case NDTPA_QUEUE_LENBYTES:
2008 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2009 nla_get_u32(tbp[i]));
2010 break;
2011 case NDTPA_PROXY_QLEN:
2012 NEIGH_VAR_SET(p, PROXY_QLEN,
2013 nla_get_u32(tbp[i]));
2014 break;
2015 case NDTPA_APP_PROBES:
2016 NEIGH_VAR_SET(p, APP_PROBES,
2017 nla_get_u32(tbp[i]));
2018 break;
2019 case NDTPA_UCAST_PROBES:
2020 NEIGH_VAR_SET(p, UCAST_PROBES,
2021 nla_get_u32(tbp[i]));
2022 break;
2023 case NDTPA_MCAST_PROBES:
2024 NEIGH_VAR_SET(p, MCAST_PROBES,
2025 nla_get_u32(tbp[i]));
2026 break;
2027 case NDTPA_MCAST_REPROBES:
2028 NEIGH_VAR_SET(p, MCAST_REPROBES,
2029 nla_get_u32(tbp[i]));
2030 break;
2031 case NDTPA_BASE_REACHABLE_TIME:
2032 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2033 nla_get_msecs(tbp[i]));
2034 /* update reachable_time as well, otherwise, the change will
2035 * only be effective after the next time neigh_periodic_work
2036 * decides to recompute it (can be multiple minutes)
2037 */
2038 p->reachable_time =
2039 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2040 break;
2041 case NDTPA_GC_STALETIME:
2042 NEIGH_VAR_SET(p, GC_STALETIME,
2043 nla_get_msecs(tbp[i]));
2044 break;
2045 case NDTPA_DELAY_PROBE_TIME:
2046 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2047 nla_get_msecs(tbp[i]));
2048 break;
2049 case NDTPA_RETRANS_TIME:
2050 NEIGH_VAR_SET(p, RETRANS_TIME,
2051 nla_get_msecs(tbp[i]));
2052 break;
2053 case NDTPA_ANYCAST_DELAY:
2054 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2055 nla_get_msecs(tbp[i]));
2056 break;
2057 case NDTPA_PROXY_DELAY:
2058 NEIGH_VAR_SET(p, PROXY_DELAY,
2059 nla_get_msecs(tbp[i]));
2060 break;
2061 case NDTPA_LOCKTIME:
2062 NEIGH_VAR_SET(p, LOCKTIME,
2063 nla_get_msecs(tbp[i]));
2064 break;
2065 }
2066 }
2067 }
2068
2069 err = -ENOENT;
2070 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2071 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2072 !net_eq(net, &init_net))
2073 goto errout_tbl_lock;
2074
2075 if (tb[NDTA_THRESH1])
2076 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2077
2078 if (tb[NDTA_THRESH2])
2079 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2080
2081 if (tb[NDTA_THRESH3])
2082 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2083
2084 if (tb[NDTA_GC_INTERVAL])
2085 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2086
2087 err = 0;
2088
2089errout_tbl_lock:
2090 write_unlock_bh(&tbl->lock);
2091errout:
2092 return err;
2093}
2094
2095static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2096{
2097 struct net *net = sock_net(skb->sk);
2098 int family, tidx, nidx = 0;
2099 int tbl_skip = cb->args[0];
2100 int neigh_skip = cb->args[1];
2101 struct neigh_table *tbl;
2102
2103 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2104
2105 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2106 struct neigh_parms *p;
2107
2108 tbl = neigh_tables[tidx];
2109 if (!tbl)
2110 continue;
2111
2112 if (tidx < tbl_skip || (family && tbl->family != family))
2113 continue;
2114
2115 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2116 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2117 NLM_F_MULTI) < 0)
2118 break;
2119
2120 nidx = 0;
2121 p = list_next_entry(&tbl->parms, list);
2122 list_for_each_entry_from(p, &tbl->parms_list, list) {
2123 if (!net_eq(neigh_parms_net(p), net))
2124 continue;
2125
2126 if (nidx < neigh_skip)
2127 goto next;
2128
2129 if (neightbl_fill_param_info(skb, tbl, p,
2130 NETLINK_CB(cb->skb).portid,
2131 cb->nlh->nlmsg_seq,
2132 RTM_NEWNEIGHTBL,
2133 NLM_F_MULTI) < 0)
2134 goto out;
2135 next:
2136 nidx++;
2137 }
2138
2139 neigh_skip = 0;
2140 }
2141out:
2142 cb->args[0] = tidx;
2143 cb->args[1] = nidx;
2144
2145 return skb->len;
2146}
2147
2148static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2149 u32 pid, u32 seq, int type, unsigned int flags)
2150{
2151 unsigned long now = jiffies;
2152 struct nda_cacheinfo ci;
2153 struct nlmsghdr *nlh;
2154 struct ndmsg *ndm;
2155
2156 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2157 if (nlh == NULL)
2158 return -EMSGSIZE;
2159
2160 ndm = nlmsg_data(nlh);
2161 ndm->ndm_family = neigh->ops->family;
2162 ndm->ndm_pad1 = 0;
2163 ndm->ndm_pad2 = 0;
2164 ndm->ndm_flags = neigh->flags;
2165 ndm->ndm_type = neigh->type;
2166 ndm->ndm_ifindex = neigh->dev->ifindex;
2167
2168 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2169 goto nla_put_failure;
2170
2171 read_lock_bh(&neigh->lock);
2172 ndm->ndm_state = neigh->nud_state;
2173 if (neigh->nud_state & NUD_VALID) {
2174 char haddr[MAX_ADDR_LEN];
2175
2176 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2177 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2178 read_unlock_bh(&neigh->lock);
2179 goto nla_put_failure;
2180 }
2181 }
2182
2183 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2184 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2185 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2186 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2187 read_unlock_bh(&neigh->lock);
2188
2189 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2190 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2191 goto nla_put_failure;
2192
2193 nlmsg_end(skb, nlh);
2194 return 0;
2195
2196nla_put_failure:
2197 nlmsg_cancel(skb, nlh);
2198 return -EMSGSIZE;
2199}
2200
2201static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2202 u32 pid, u32 seq, int type, unsigned int flags,
2203 struct neigh_table *tbl)
2204{
2205 struct nlmsghdr *nlh;
2206 struct ndmsg *ndm;
2207
2208 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2209 if (nlh == NULL)
2210 return -EMSGSIZE;
2211
2212 ndm = nlmsg_data(nlh);
2213 ndm->ndm_family = tbl->family;
2214 ndm->ndm_pad1 = 0;
2215 ndm->ndm_pad2 = 0;
2216 ndm->ndm_flags = pn->flags | NTF_PROXY;
2217 ndm->ndm_type = RTN_UNICAST;
2218 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219 ndm->ndm_state = NUD_NONE;
2220
2221 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2222 goto nla_put_failure;
2223
2224 nlmsg_end(skb, nlh);
2225 return 0;
2226
2227nla_put_failure:
2228 nlmsg_cancel(skb, nlh);
2229 return -EMSGSIZE;
2230}
2231
2232static void neigh_update_notify(struct neighbour *neigh)
2233{
2234 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2235 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2236}
2237
2238static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2239{
2240 struct net_device *master;
2241
2242 if (!master_idx)
2243 return false;
2244
2245 master = netdev_master_upper_dev_get(dev);
2246 if (!master || master->ifindex != master_idx)
2247 return true;
2248
2249 return false;
2250}
2251
2252static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2253{
2254 if (filter_idx && dev->ifindex != filter_idx)
2255 return true;
2256
2257 return false;
2258}
2259
2260static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2261 struct netlink_callback *cb)
2262{
2263 struct net *net = sock_net(skb->sk);
2264 const struct nlmsghdr *nlh = cb->nlh;
2265 struct nlattr *tb[NDA_MAX + 1];
2266 struct neighbour *n;
2267 int rc, h, s_h = cb->args[1];
2268 int idx, s_idx = idx = cb->args[2];
2269 struct neigh_hash_table *nht;
2270 int filter_master_idx = 0, filter_idx = 0;
2271 unsigned int flags = NLM_F_MULTI;
2272 int err;
2273
2274 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2275 if (!err) {
2276 if (tb[NDA_IFINDEX])
2277 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2278
2279 if (tb[NDA_MASTER])
2280 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2281
2282 if (filter_idx || filter_master_idx)
2283 flags |= NLM_F_DUMP_FILTERED;
2284 }
2285
2286 rcu_read_lock_bh();
2287 nht = rcu_dereference_bh(tbl->nht);
2288
2289 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2290 if (h > s_h)
2291 s_idx = 0;
2292 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2293 n != NULL;
2294 n = rcu_dereference_bh(n->next)) {
2295 if (!net_eq(dev_net(n->dev), net))
2296 continue;
2297 if (neigh_ifindex_filtered(n->dev, filter_idx))
2298 continue;
2299 if (neigh_master_filtered(n->dev, filter_master_idx))
2300 continue;
2301 if (idx < s_idx)
2302 goto next;
2303 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2304 cb->nlh->nlmsg_seq,
2305 RTM_NEWNEIGH,
2306 flags) < 0) {
2307 rc = -1;
2308 goto out;
2309 }
2310next:
2311 idx++;
2312 }
2313 }
2314 rc = skb->len;
2315out:
2316 rcu_read_unlock_bh();
2317 cb->args[1] = h;
2318 cb->args[2] = idx;
2319 return rc;
2320}
2321
2322static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2323 struct netlink_callback *cb)
2324{
2325 struct pneigh_entry *n;
2326 struct net *net = sock_net(skb->sk);
2327 int rc, h, s_h = cb->args[3];
2328 int idx, s_idx = idx = cb->args[4];
2329
2330 read_lock_bh(&tbl->lock);
2331
2332 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2333 if (h > s_h)
2334 s_idx = 0;
2335 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2336 if (pneigh_net(n) != net)
2337 continue;
2338 if (idx < s_idx)
2339 goto next;
2340 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2341 cb->nlh->nlmsg_seq,
2342 RTM_NEWNEIGH,
2343 NLM_F_MULTI, tbl) < 0) {
2344 read_unlock_bh(&tbl->lock);
2345 rc = -1;
2346 goto out;
2347 }
2348 next:
2349 idx++;
2350 }
2351 }
2352
2353 read_unlock_bh(&tbl->lock);
2354 rc = skb->len;
2355out:
2356 cb->args[3] = h;
2357 cb->args[4] = idx;
2358 return rc;
2359
2360}
2361
2362static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2363{
2364 struct neigh_table *tbl;
2365 int t, family, s_t;
2366 int proxy = 0;
2367 int err;
2368
2369 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2370
2371 /* check for full ndmsg structure presence, family member is
2372 * the same for both structures
2373 */
2374 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2375 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2376 proxy = 1;
2377
2378 s_t = cb->args[0];
2379
2380 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2381 tbl = neigh_tables[t];
2382
2383 if (!tbl)
2384 continue;
2385 if (t < s_t || (family && tbl->family != family))
2386 continue;
2387 if (t > s_t)
2388 memset(&cb->args[1], 0, sizeof(cb->args) -
2389 sizeof(cb->args[0]));
2390 if (proxy)
2391 err = pneigh_dump_table(tbl, skb, cb);
2392 else
2393 err = neigh_dump_table(tbl, skb, cb);
2394 if (err < 0)
2395 break;
2396 }
2397
2398 cb->args[0] = t;
2399 return skb->len;
2400}
2401
2402void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2403{
2404 int chain;
2405 struct neigh_hash_table *nht;
2406
2407 rcu_read_lock_bh();
2408 nht = rcu_dereference_bh(tbl->nht);
2409
2410 read_lock(&tbl->lock); /* avoid resizes */
2411 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2412 struct neighbour *n;
2413
2414 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2415 n != NULL;
2416 n = rcu_dereference_bh(n->next))
2417 cb(n, cookie);
2418 }
2419 read_unlock(&tbl->lock);
2420 rcu_read_unlock_bh();
2421}
2422EXPORT_SYMBOL(neigh_for_each);
2423
2424/* The tbl->lock must be held as a writer and BH disabled. */
2425void __neigh_for_each_release(struct neigh_table *tbl,
2426 int (*cb)(struct neighbour *))
2427{
2428 int chain;
2429 struct neigh_hash_table *nht;
2430
2431 nht = rcu_dereference_protected(tbl->nht,
2432 lockdep_is_held(&tbl->lock));
2433 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2434 struct neighbour *n;
2435 struct neighbour __rcu **np;
2436
2437 np = &nht->hash_buckets[chain];
2438 while ((n = rcu_dereference_protected(*np,
2439 lockdep_is_held(&tbl->lock))) != NULL) {
2440 int release;
2441
2442 write_lock(&n->lock);
2443 release = cb(n);
2444 if (release) {
2445 rcu_assign_pointer(*np,
2446 rcu_dereference_protected(n->next,
2447 lockdep_is_held(&tbl->lock)));
2448 n->dead = 1;
2449 } else
2450 np = &n->next;
2451 write_unlock(&n->lock);
2452 if (release)
2453 neigh_cleanup_and_release(n);
2454 }
2455 }
2456}
2457EXPORT_SYMBOL(__neigh_for_each_release);
2458
2459int neigh_xmit(int index, struct net_device *dev,
2460 const void *addr, struct sk_buff *skb)
2461{
2462 int err = -EAFNOSUPPORT;
2463 if (likely(index < NEIGH_NR_TABLES)) {
2464 struct neigh_table *tbl;
2465 struct neighbour *neigh;
2466
2467 tbl = neigh_tables[index];
2468 if (!tbl)
2469 goto out;
2470 neigh = __neigh_lookup_noref(tbl, addr, dev);
2471 if (!neigh)
2472 neigh = __neigh_create(tbl, addr, dev, false);
2473 err = PTR_ERR(neigh);
2474 if (IS_ERR(neigh))
2475 goto out_kfree_skb;
2476 err = neigh->output(neigh, skb);
2477 }
2478 else if (index == NEIGH_LINK_TABLE) {
2479 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2480 addr, NULL, skb->len);
2481 if (err < 0)
2482 goto out_kfree_skb;
2483 err = dev_queue_xmit(skb);
2484 }
2485out:
2486 return err;
2487out_kfree_skb:
2488 kfree_skb(skb);
2489 goto out;
2490}
2491EXPORT_SYMBOL(neigh_xmit);
2492
2493#ifdef CONFIG_PROC_FS
2494
2495static struct neighbour *neigh_get_first(struct seq_file *seq)
2496{
2497 struct neigh_seq_state *state = seq->private;
2498 struct net *net = seq_file_net(seq);
2499 struct neigh_hash_table *nht = state->nht;
2500 struct neighbour *n = NULL;
2501 int bucket = state->bucket;
2502
2503 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2504 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2505 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2506
2507 while (n) {
2508 if (!net_eq(dev_net(n->dev), net))
2509 goto next;
2510 if (state->neigh_sub_iter) {
2511 loff_t fakep = 0;
2512 void *v;
2513
2514 v = state->neigh_sub_iter(state, n, &fakep);
2515 if (!v)
2516 goto next;
2517 }
2518 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2519 break;
2520 if (n->nud_state & ~NUD_NOARP)
2521 break;
2522next:
2523 n = rcu_dereference_bh(n->next);
2524 }
2525
2526 if (n)
2527 break;
2528 }
2529 state->bucket = bucket;
2530
2531 return n;
2532}
2533
2534static struct neighbour *neigh_get_next(struct seq_file *seq,
2535 struct neighbour *n,
2536 loff_t *pos)
2537{
2538 struct neigh_seq_state *state = seq->private;
2539 struct net *net = seq_file_net(seq);
2540 struct neigh_hash_table *nht = state->nht;
2541
2542 if (state->neigh_sub_iter) {
2543 void *v = state->neigh_sub_iter(state, n, pos);
2544 if (v)
2545 return n;
2546 }
2547 n = rcu_dereference_bh(n->next);
2548
2549 while (1) {
2550 while (n) {
2551 if (!net_eq(dev_net(n->dev), net))
2552 goto next;
2553 if (state->neigh_sub_iter) {
2554 void *v = state->neigh_sub_iter(state, n, pos);
2555 if (v)
2556 return n;
2557 goto next;
2558 }
2559 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2560 break;
2561
2562 if (n->nud_state & ~NUD_NOARP)
2563 break;
2564next:
2565 n = rcu_dereference_bh(n->next);
2566 }
2567
2568 if (n)
2569 break;
2570
2571 if (++state->bucket >= (1 << nht->hash_shift))
2572 break;
2573
2574 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2575 }
2576
2577 if (n && pos)
2578 --(*pos);
2579 return n;
2580}
2581
2582static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2583{
2584 struct neighbour *n = neigh_get_first(seq);
2585
2586 if (n) {
2587 --(*pos);
2588 while (*pos) {
2589 n = neigh_get_next(seq, n, pos);
2590 if (!n)
2591 break;
2592 }
2593 }
2594 return *pos ? NULL : n;
2595}
2596
2597static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2598{
2599 struct neigh_seq_state *state = seq->private;
2600 struct net *net = seq_file_net(seq);
2601 struct neigh_table *tbl = state->tbl;
2602 struct pneigh_entry *pn = NULL;
2603 int bucket = state->bucket;
2604
2605 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2606 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2607 pn = tbl->phash_buckets[bucket];
2608 while (pn && !net_eq(pneigh_net(pn), net))
2609 pn = pn->next;
2610 if (pn)
2611 break;
2612 }
2613 state->bucket = bucket;
2614
2615 return pn;
2616}
2617
2618static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2619 struct pneigh_entry *pn,
2620 loff_t *pos)
2621{
2622 struct neigh_seq_state *state = seq->private;
2623 struct net *net = seq_file_net(seq);
2624 struct neigh_table *tbl = state->tbl;
2625
2626 do {
2627 pn = pn->next;
2628 } while (pn && !net_eq(pneigh_net(pn), net));
2629
2630 while (!pn) {
2631 if (++state->bucket > PNEIGH_HASHMASK)
2632 break;
2633 pn = tbl->phash_buckets[state->bucket];
2634 while (pn && !net_eq(pneigh_net(pn), net))
2635 pn = pn->next;
2636 if (pn)
2637 break;
2638 }
2639
2640 if (pn && pos)
2641 --(*pos);
2642
2643 return pn;
2644}
2645
2646static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2647{
2648 struct pneigh_entry *pn = pneigh_get_first(seq);
2649
2650 if (pn) {
2651 --(*pos);
2652 while (*pos) {
2653 pn = pneigh_get_next(seq, pn, pos);
2654 if (!pn)
2655 break;
2656 }
2657 }
2658 return *pos ? NULL : pn;
2659}
2660
2661static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2662{
2663 struct neigh_seq_state *state = seq->private;
2664 void *rc;
2665 loff_t idxpos = *pos;
2666
2667 rc = neigh_get_idx(seq, &idxpos);
2668 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2669 rc = pneigh_get_idx(seq, &idxpos);
2670
2671 return rc;
2672}
2673
2674void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2675 __acquires(rcu_bh)
2676{
2677 struct neigh_seq_state *state = seq->private;
2678
2679 state->tbl = tbl;
2680 state->bucket = 0;
2681 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2682
2683 rcu_read_lock_bh();
2684 state->nht = rcu_dereference_bh(tbl->nht);
2685
2686 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2687}
2688EXPORT_SYMBOL(neigh_seq_start);
2689
2690void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2691{
2692 struct neigh_seq_state *state;
2693 void *rc;
2694
2695 if (v == SEQ_START_TOKEN) {
2696 rc = neigh_get_first(seq);
2697 goto out;
2698 }
2699
2700 state = seq->private;
2701 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2702 rc = neigh_get_next(seq, v, NULL);
2703 if (rc)
2704 goto out;
2705 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2706 rc = pneigh_get_first(seq);
2707 } else {
2708 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2709 rc = pneigh_get_next(seq, v, NULL);
2710 }
2711out:
2712 ++(*pos);
2713 return rc;
2714}
2715EXPORT_SYMBOL(neigh_seq_next);
2716
2717void neigh_seq_stop(struct seq_file *seq, void *v)
2718 __releases(rcu_bh)
2719{
2720 rcu_read_unlock_bh();
2721}
2722EXPORT_SYMBOL(neigh_seq_stop);
2723
2724/* statistics via seq_file */
2725
2726static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2727{
2728 struct neigh_table *tbl = seq->private;
2729 int cpu;
2730
2731 if (*pos == 0)
2732 return SEQ_START_TOKEN;
2733
2734 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2735 if (!cpu_possible(cpu))
2736 continue;
2737 *pos = cpu+1;
2738 return per_cpu_ptr(tbl->stats, cpu);
2739 }
2740 return NULL;
2741}
2742
2743static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2744{
2745 struct neigh_table *tbl = seq->private;
2746 int cpu;
2747
2748 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2749 if (!cpu_possible(cpu))
2750 continue;
2751 *pos = cpu+1;
2752 return per_cpu_ptr(tbl->stats, cpu);
2753 }
2754 return NULL;
2755}
2756
2757static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2758{
2759
2760}
2761
2762static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2763{
2764 struct neigh_table *tbl = seq->private;
2765 struct neigh_statistics *st = v;
2766
2767 if (v == SEQ_START_TOKEN) {
2768 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2769 return 0;
2770 }
2771
2772 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2773 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
2774 atomic_read(&tbl->entries),
2775
2776 st->allocs,
2777 st->destroys,
2778 st->hash_grows,
2779
2780 st->lookups,
2781 st->hits,
2782
2783 st->res_failed,
2784
2785 st->rcv_probes_mcast,
2786 st->rcv_probes_ucast,
2787
2788 st->periodic_gc_runs,
2789 st->forced_gc_runs,
2790 st->unres_discards,
2791 st->table_fulls
2792 );
2793
2794 return 0;
2795}
2796
2797static const struct seq_operations neigh_stat_seq_ops = {
2798 .start = neigh_stat_seq_start,
2799 .next = neigh_stat_seq_next,
2800 .stop = neigh_stat_seq_stop,
2801 .show = neigh_stat_seq_show,
2802};
2803
2804static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2805{
2806 int ret = seq_open(file, &neigh_stat_seq_ops);
2807
2808 if (!ret) {
2809 struct seq_file *sf = file->private_data;
2810 sf->private = PDE_DATA(inode);
2811 }
2812 return ret;
2813};
2814
2815static const struct file_operations neigh_stat_seq_fops = {
2816 .owner = THIS_MODULE,
2817 .open = neigh_stat_seq_open,
2818 .read = seq_read,
2819 .llseek = seq_lseek,
2820 .release = seq_release,
2821};
2822
2823#endif /* CONFIG_PROC_FS */
2824
2825static inline size_t neigh_nlmsg_size(void)
2826{
2827 return NLMSG_ALIGN(sizeof(struct ndmsg))
2828 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2829 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2830 + nla_total_size(sizeof(struct nda_cacheinfo))
2831 + nla_total_size(4); /* NDA_PROBES */
2832}
2833
2834static void __neigh_notify(struct neighbour *n, int type, int flags)
2835{
2836 struct net *net = dev_net(n->dev);
2837 struct sk_buff *skb;
2838 int err = -ENOBUFS;
2839
2840 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2841 if (skb == NULL)
2842 goto errout;
2843
2844 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2845 if (err < 0) {
2846 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2847 WARN_ON(err == -EMSGSIZE);
2848 kfree_skb(skb);
2849 goto errout;
2850 }
2851 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2852 return;
2853errout:
2854 if (err < 0)
2855 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2856}
2857
2858void neigh_app_ns(struct neighbour *n)
2859{
2860 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2861}
2862EXPORT_SYMBOL(neigh_app_ns);
2863
2864#ifdef CONFIG_SYSCTL
2865static int zero;
2866static int int_max = INT_MAX;
2867static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2868
2869static int proc_unres_qlen(struct ctl_table *ctl, int write,
2870 void __user *buffer, size_t *lenp, loff_t *ppos)
2871{
2872 int size, ret;
2873 struct ctl_table tmp = *ctl;
2874
2875 tmp.extra1 = &zero;
2876 tmp.extra2 = &unres_qlen_max;
2877 tmp.data = &size;
2878
2879 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2880 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2881
2882 if (write && !ret)
2883 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2884 return ret;
2885}
2886
2887static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2888 int family)
2889{
2890 switch (family) {
2891 case AF_INET:
2892 return __in_dev_arp_parms_get_rcu(dev);
2893 case AF_INET6:
2894 return __in6_dev_nd_parms_get_rcu(dev);
2895 }
2896 return NULL;
2897}
2898
2899static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2900 int index)
2901{
2902 struct net_device *dev;
2903 int family = neigh_parms_family(p);
2904
2905 rcu_read_lock();
2906 for_each_netdev_rcu(net, dev) {
2907 struct neigh_parms *dst_p =
2908 neigh_get_dev_parms_rcu(dev, family);
2909
2910 if (dst_p && !test_bit(index, dst_p->data_state))
2911 dst_p->data[index] = p->data[index];
2912 }
2913 rcu_read_unlock();
2914}
2915
2916static void neigh_proc_update(struct ctl_table *ctl, int write)
2917{
2918 struct net_device *dev = ctl->extra1;
2919 struct neigh_parms *p = ctl->extra2;
2920 struct net *net = neigh_parms_net(p);
2921 int index = (int *) ctl->data - p->data;
2922
2923 if (!write)
2924 return;
2925
2926 set_bit(index, p->data_state);
2927 if (!dev) /* NULL dev means this is default value */
2928 neigh_copy_dflt_parms(net, p, index);
2929}
2930
2931static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2932 void __user *buffer,
2933 size_t *lenp, loff_t *ppos)
2934{
2935 struct ctl_table tmp = *ctl;
2936 int ret;
2937
2938 tmp.extra1 = &zero;
2939 tmp.extra2 = &int_max;
2940
2941 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2942 neigh_proc_update(ctl, write);
2943 return ret;
2944}
2945
2946int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2947 void __user *buffer, size_t *lenp, loff_t *ppos)
2948{
2949 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2950
2951 neigh_proc_update(ctl, write);
2952 return ret;
2953}
2954EXPORT_SYMBOL(neigh_proc_dointvec);
2955
2956int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2957 void __user *buffer,
2958 size_t *lenp, loff_t *ppos)
2959{
2960 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2961
2962 neigh_proc_update(ctl, write);
2963 return ret;
2964}
2965EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2966
2967static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2968 void __user *buffer,
2969 size_t *lenp, loff_t *ppos)
2970{
2971 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2972
2973 neigh_proc_update(ctl, write);
2974 return ret;
2975}
2976
2977int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2978 void __user *buffer,
2979 size_t *lenp, loff_t *ppos)
2980{
2981 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2982
2983 neigh_proc_update(ctl, write);
2984 return ret;
2985}
2986EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2987
2988static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2989 void __user *buffer,
2990 size_t *lenp, loff_t *ppos)
2991{
2992 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2993
2994 neigh_proc_update(ctl, write);
2995 return ret;
2996}
2997
2998static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
2999 void __user *buffer,
3000 size_t *lenp, loff_t *ppos)
3001{
3002 struct neigh_parms *p = ctl->extra2;
3003 int ret;
3004
3005 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3006 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3007 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3008 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3009 else
3010 ret = -1;
3011
3012 if (write && ret == 0) {
3013 /* update reachable_time as well, otherwise, the change will
3014 * only be effective after the next time neigh_periodic_work
3015 * decides to recompute it
3016 */
3017 p->reachable_time =
3018 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3019 }
3020 return ret;
3021}
3022
3023#define NEIGH_PARMS_DATA_OFFSET(index) \
3024 (&((struct neigh_parms *) 0)->data[index])
3025
3026#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3027 [NEIGH_VAR_ ## attr] = { \
3028 .procname = name, \
3029 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3030 .maxlen = sizeof(int), \
3031 .mode = mval, \
3032 .proc_handler = proc, \
3033 }
3034
3035#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3036 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3037
3038#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3039 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3040
3041#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3042 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3043
3044#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3045 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3046
3047#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3048 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3049
3050#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3051 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3052
3053static struct neigh_sysctl_table {
3054 struct ctl_table_header *sysctl_header;
3055 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3056} neigh_sysctl_template __read_mostly = {
3057 .neigh_vars = {
3058 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3059 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3060 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3061 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3062 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3063 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3064 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3065 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3066 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3067 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3068 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3069 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3070 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3071 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3072 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3073 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3074 [NEIGH_VAR_GC_INTERVAL] = {
3075 .procname = "gc_interval",
3076 .maxlen = sizeof(int),
3077 .mode = 0644,
3078 .proc_handler = proc_dointvec_jiffies,
3079 },
3080 [NEIGH_VAR_GC_THRESH1] = {
3081 .procname = "gc_thresh1",
3082 .maxlen = sizeof(int),
3083 .mode = 0644,
3084 .extra1 = &zero,
3085 .extra2 = &int_max,
3086 .proc_handler = proc_dointvec_minmax,
3087 },
3088 [NEIGH_VAR_GC_THRESH2] = {
3089 .procname = "gc_thresh2",
3090 .maxlen = sizeof(int),
3091 .mode = 0644,
3092 .extra1 = &zero,
3093 .extra2 = &int_max,
3094 .proc_handler = proc_dointvec_minmax,
3095 },
3096 [NEIGH_VAR_GC_THRESH3] = {
3097 .procname = "gc_thresh3",
3098 .maxlen = sizeof(int),
3099 .mode = 0644,
3100 .extra1 = &zero,
3101 .extra2 = &int_max,
3102 .proc_handler = proc_dointvec_minmax,
3103 },
3104 {},
3105 },
3106};
3107
3108int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3109 proc_handler *handler)
3110{
3111 int i;
3112 struct neigh_sysctl_table *t;
3113 const char *dev_name_source;
3114 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3115 char *p_name;
3116
3117 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3118 if (!t)
3119 goto err;
3120
3121 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3122 t->neigh_vars[i].data += (long) p;
3123 t->neigh_vars[i].extra1 = dev;
3124 t->neigh_vars[i].extra2 = p;
3125 }
3126
3127 if (dev) {
3128 dev_name_source = dev->name;
3129 /* Terminate the table early */
3130 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3131 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3132 } else {
3133 struct neigh_table *tbl = p->tbl;
3134 dev_name_source = "default";
3135 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3136 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3137 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3138 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3139 }
3140
3141 if (handler) {
3142 /* RetransTime */
3143 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3144 /* ReachableTime */
3145 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3146 /* RetransTime (in milliseconds)*/
3147 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3148 /* ReachableTime (in milliseconds) */
3149 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3150 } else {
3151 /* Those handlers will update p->reachable_time after
3152 * base_reachable_time(_ms) is set to ensure the new timer starts being
3153 * applied after the next neighbour update instead of waiting for
3154 * neigh_periodic_work to update its value (can be multiple minutes)
3155 * So any handler that replaces them should do this as well
3156 */
3157 /* ReachableTime */
3158 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3159 neigh_proc_base_reachable_time;
3160 /* ReachableTime (in milliseconds) */
3161 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3162 neigh_proc_base_reachable_time;
3163 }
3164
3165 /* Don't export sysctls to unprivileged users */
3166 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3167 t->neigh_vars[0].procname = NULL;
3168
3169 switch (neigh_parms_family(p)) {
3170 case AF_INET:
3171 p_name = "ipv4";
3172 break;
3173 case AF_INET6:
3174 p_name = "ipv6";
3175 break;
3176 default:
3177 BUG();
3178 }
3179
3180 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3181 p_name, dev_name_source);
3182 t->sysctl_header =
3183 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3184 if (!t->sysctl_header)
3185 goto free;
3186
3187 p->sysctl_table = t;
3188 return 0;
3189
3190free:
3191 kfree(t);
3192err:
3193 return -ENOBUFS;
3194}
3195EXPORT_SYMBOL(neigh_sysctl_register);
3196
3197void neigh_sysctl_unregister(struct neigh_parms *p)
3198{
3199 if (p->sysctl_table) {
3200 struct neigh_sysctl_table *t = p->sysctl_table;
3201 p->sysctl_table = NULL;
3202 unregister_net_sysctl_table(t->sysctl_header);
3203 kfree(t);
3204 }
3205}
3206EXPORT_SYMBOL(neigh_sysctl_unregister);
3207
3208#endif /* CONFIG_SYSCTL */
3209
3210static int __init neigh_init(void)
3211{
3212 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3213 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3214 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3215
3216 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3217 NULL);
3218 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3219
3220 return 0;
3221}
3222
3223subsys_initcall(neigh_init);
3224
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/socket.h>
25#include <linux/netdevice.h>
26#include <linux/proc_fs.h>
27#ifdef CONFIG_SYSCTL
28#include <linux/sysctl.h>
29#endif
30#include <linux/times.h>
31#include <net/net_namespace.h>
32#include <net/neighbour.h>
33#include <net/dst.h>
34#include <net/sock.h>
35#include <net/netevent.h>
36#include <net/netlink.h>
37#include <linux/rtnetlink.h>
38#include <linux/random.h>
39#include <linux/string.h>
40#include <linux/log2.h>
41
42#define NEIGH_DEBUG 1
43
44#define NEIGH_PRINTK(x...) printk(x)
45#define NEIGH_NOPRINTK(x...) do { ; } while(0)
46#define NEIGH_PRINTK1 NEIGH_NOPRINTK
47#define NEIGH_PRINTK2 NEIGH_NOPRINTK
48
49#if NEIGH_DEBUG >= 1
50#undef NEIGH_PRINTK1
51#define NEIGH_PRINTK1 NEIGH_PRINTK
52#endif
53#if NEIGH_DEBUG >= 2
54#undef NEIGH_PRINTK2
55#define NEIGH_PRINTK2 NEIGH_PRINTK
56#endif
57
58#define PNEIGH_HASHMASK 0xF
59
60static void neigh_timer_handler(unsigned long arg);
61static void __neigh_notify(struct neighbour *n, int type, int flags);
62static void neigh_update_notify(struct neighbour *neigh);
63static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
64
65static struct neigh_table *neigh_tables;
66#ifdef CONFIG_PROC_FS
67static const struct file_operations neigh_stat_seq_fops;
68#endif
69
70/*
71 Neighbour hash table buckets are protected with rwlock tbl->lock.
72
73 - All the scans/updates to hash buckets MUST be made under this lock.
74 - NOTHING clever should be made under this lock: no callbacks
75 to protocol backends, no attempts to send something to network.
76 It will result in deadlocks, if backend/driver wants to use neighbour
77 cache.
78 - If the entry requires some non-trivial actions, increase
79 its reference count and release table lock.
80
81 Neighbour entries are protected:
82 - with reference count.
83 - with rwlock neigh->lock
84
85 Reference count prevents destruction.
86
87 neigh->lock mainly serializes ll address data and its validity state.
88 However, the same lock is used to protect another entry fields:
89 - timer
90 - resolution queue
91
92 Again, nothing clever shall be made under neigh->lock,
93 the most complicated procedure, which we allow is dev->hard_header.
94 It is supposed, that dev->hard_header is simplistic and does
95 not make callbacks to neighbour tables.
96
97 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
98 list of neighbour tables. This list is used only in process context,
99 */
100
101static DEFINE_RWLOCK(neigh_tbl_lock);
102
103static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
104{
105 kfree_skb(skb);
106 return -ENETDOWN;
107}
108
109static void neigh_cleanup_and_release(struct neighbour *neigh)
110{
111 if (neigh->parms->neigh_cleanup)
112 neigh->parms->neigh_cleanup(neigh);
113
114 __neigh_notify(neigh, RTM_DELNEIGH, 0);
115 neigh_release(neigh);
116}
117
118/*
119 * It is random distribution in the interval (1/2)*base...(3/2)*base.
120 * It corresponds to default IPv6 settings and is not overridable,
121 * because it is really reasonable choice.
122 */
123
124unsigned long neigh_rand_reach_time(unsigned long base)
125{
126 return base ? (net_random() % base) + (base >> 1) : 0;
127}
128EXPORT_SYMBOL(neigh_rand_reach_time);
129
130
131static int neigh_forced_gc(struct neigh_table *tbl)
132{
133 int shrunk = 0;
134 int i;
135 struct neigh_hash_table *nht;
136
137 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
138
139 write_lock_bh(&tbl->lock);
140 nht = rcu_dereference_protected(tbl->nht,
141 lockdep_is_held(&tbl->lock));
142 for (i = 0; i < (1 << nht->hash_shift); i++) {
143 struct neighbour *n;
144 struct neighbour __rcu **np;
145
146 np = &nht->hash_buckets[i];
147 while ((n = rcu_dereference_protected(*np,
148 lockdep_is_held(&tbl->lock))) != NULL) {
149 /* Neighbour record may be discarded if:
150 * - nobody refers to it.
151 * - it is not permanent
152 */
153 write_lock(&n->lock);
154 if (atomic_read(&n->refcnt) == 1 &&
155 !(n->nud_state & NUD_PERMANENT)) {
156 rcu_assign_pointer(*np,
157 rcu_dereference_protected(n->next,
158 lockdep_is_held(&tbl->lock)));
159 n->dead = 1;
160 shrunk = 1;
161 write_unlock(&n->lock);
162 neigh_cleanup_and_release(n);
163 continue;
164 }
165 write_unlock(&n->lock);
166 np = &n->next;
167 }
168 }
169
170 tbl->last_flush = jiffies;
171
172 write_unlock_bh(&tbl->lock);
173
174 return shrunk;
175}
176
177static void neigh_add_timer(struct neighbour *n, unsigned long when)
178{
179 neigh_hold(n);
180 if (unlikely(mod_timer(&n->timer, when))) {
181 printk("NEIGH: BUG, double timer add, state is %x\n",
182 n->nud_state);
183 dump_stack();
184 }
185}
186
187static int neigh_del_timer(struct neighbour *n)
188{
189 if ((n->nud_state & NUD_IN_TIMER) &&
190 del_timer(&n->timer)) {
191 neigh_release(n);
192 return 1;
193 }
194 return 0;
195}
196
197static void pneigh_queue_purge(struct sk_buff_head *list)
198{
199 struct sk_buff *skb;
200
201 while ((skb = skb_dequeue(list)) != NULL) {
202 dev_put(skb->dev);
203 kfree_skb(skb);
204 }
205}
206
207static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208{
209 int i;
210 struct neigh_hash_table *nht;
211
212 nht = rcu_dereference_protected(tbl->nht,
213 lockdep_is_held(&tbl->lock));
214
215 for (i = 0; i < (1 << nht->hash_shift); i++) {
216 struct neighbour *n;
217 struct neighbour __rcu **np = &nht->hash_buckets[i];
218
219 while ((n = rcu_dereference_protected(*np,
220 lockdep_is_held(&tbl->lock))) != NULL) {
221 if (dev && n->dev != dev) {
222 np = &n->next;
223 continue;
224 }
225 rcu_assign_pointer(*np,
226 rcu_dereference_protected(n->next,
227 lockdep_is_held(&tbl->lock)));
228 write_lock(&n->lock);
229 neigh_del_timer(n);
230 n->dead = 1;
231
232 if (atomic_read(&n->refcnt) != 1) {
233 /* The most unpleasant situation.
234 We must destroy neighbour entry,
235 but someone still uses it.
236
237 The destroy will be delayed until
238 the last user releases us, but
239 we must kill timers etc. and move
240 it to safe state.
241 */
242 skb_queue_purge(&n->arp_queue);
243 n->arp_queue_len_bytes = 0;
244 n->output = neigh_blackhole;
245 if (n->nud_state & NUD_VALID)
246 n->nud_state = NUD_NOARP;
247 else
248 n->nud_state = NUD_NONE;
249 NEIGH_PRINTK2("neigh %p is stray.\n", n);
250 }
251 write_unlock(&n->lock);
252 neigh_cleanup_and_release(n);
253 }
254 }
255}
256
257void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
258{
259 write_lock_bh(&tbl->lock);
260 neigh_flush_dev(tbl, dev);
261 write_unlock_bh(&tbl->lock);
262}
263EXPORT_SYMBOL(neigh_changeaddr);
264
265int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
266{
267 write_lock_bh(&tbl->lock);
268 neigh_flush_dev(tbl, dev);
269 pneigh_ifdown(tbl, dev);
270 write_unlock_bh(&tbl->lock);
271
272 del_timer_sync(&tbl->proxy_timer);
273 pneigh_queue_purge(&tbl->proxy_queue);
274 return 0;
275}
276EXPORT_SYMBOL(neigh_ifdown);
277
278static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
279{
280 struct neighbour *n = NULL;
281 unsigned long now = jiffies;
282 int entries;
283
284 entries = atomic_inc_return(&tbl->entries) - 1;
285 if (entries >= tbl->gc_thresh3 ||
286 (entries >= tbl->gc_thresh2 &&
287 time_after(now, tbl->last_flush + 5 * HZ))) {
288 if (!neigh_forced_gc(tbl) &&
289 entries >= tbl->gc_thresh3)
290 goto out_entries;
291 }
292
293 if (tbl->entry_size)
294 n = kzalloc(tbl->entry_size, GFP_ATOMIC);
295 else {
296 int sz = sizeof(*n) + tbl->key_len;
297
298 sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
299 sz += dev->neigh_priv_len;
300 n = kzalloc(sz, GFP_ATOMIC);
301 }
302 if (!n)
303 goto out_entries;
304
305 skb_queue_head_init(&n->arp_queue);
306 rwlock_init(&n->lock);
307 seqlock_init(&n->ha_lock);
308 n->updated = n->used = now;
309 n->nud_state = NUD_NONE;
310 n->output = neigh_blackhole;
311 seqlock_init(&n->hh.hh_lock);
312 n->parms = neigh_parms_clone(&tbl->parms);
313 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
314
315 NEIGH_CACHE_STAT_INC(tbl, allocs);
316 n->tbl = tbl;
317 atomic_set(&n->refcnt, 1);
318 n->dead = 1;
319out:
320 return n;
321
322out_entries:
323 atomic_dec(&tbl->entries);
324 goto out;
325}
326
327static void neigh_get_hash_rnd(u32 *x)
328{
329 get_random_bytes(x, sizeof(*x));
330 *x |= 1;
331}
332
333static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
334{
335 size_t size = (1 << shift) * sizeof(struct neighbour *);
336 struct neigh_hash_table *ret;
337 struct neighbour __rcu **buckets;
338 int i;
339
340 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
341 if (!ret)
342 return NULL;
343 if (size <= PAGE_SIZE)
344 buckets = kzalloc(size, GFP_ATOMIC);
345 else
346 buckets = (struct neighbour __rcu **)
347 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
348 get_order(size));
349 if (!buckets) {
350 kfree(ret);
351 return NULL;
352 }
353 ret->hash_buckets = buckets;
354 ret->hash_shift = shift;
355 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
356 neigh_get_hash_rnd(&ret->hash_rnd[i]);
357 return ret;
358}
359
360static void neigh_hash_free_rcu(struct rcu_head *head)
361{
362 struct neigh_hash_table *nht = container_of(head,
363 struct neigh_hash_table,
364 rcu);
365 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
366 struct neighbour __rcu **buckets = nht->hash_buckets;
367
368 if (size <= PAGE_SIZE)
369 kfree(buckets);
370 else
371 free_pages((unsigned long)buckets, get_order(size));
372 kfree(nht);
373}
374
375static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
376 unsigned long new_shift)
377{
378 unsigned int i, hash;
379 struct neigh_hash_table *new_nht, *old_nht;
380
381 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
382
383 old_nht = rcu_dereference_protected(tbl->nht,
384 lockdep_is_held(&tbl->lock));
385 new_nht = neigh_hash_alloc(new_shift);
386 if (!new_nht)
387 return old_nht;
388
389 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
390 struct neighbour *n, *next;
391
392 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
393 lockdep_is_held(&tbl->lock));
394 n != NULL;
395 n = next) {
396 hash = tbl->hash(n->primary_key, n->dev,
397 new_nht->hash_rnd);
398
399 hash >>= (32 - new_nht->hash_shift);
400 next = rcu_dereference_protected(n->next,
401 lockdep_is_held(&tbl->lock));
402
403 rcu_assign_pointer(n->next,
404 rcu_dereference_protected(
405 new_nht->hash_buckets[hash],
406 lockdep_is_held(&tbl->lock)));
407 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
408 }
409 }
410
411 rcu_assign_pointer(tbl->nht, new_nht);
412 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
413 return new_nht;
414}
415
416struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
417 struct net_device *dev)
418{
419 struct neighbour *n;
420 int key_len = tbl->key_len;
421 u32 hash_val;
422 struct neigh_hash_table *nht;
423
424 NEIGH_CACHE_STAT_INC(tbl, lookups);
425
426 rcu_read_lock_bh();
427 nht = rcu_dereference_bh(tbl->nht);
428 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
429
430 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
431 n != NULL;
432 n = rcu_dereference_bh(n->next)) {
433 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
434 if (!atomic_inc_not_zero(&n->refcnt))
435 n = NULL;
436 NEIGH_CACHE_STAT_INC(tbl, hits);
437 break;
438 }
439 }
440
441 rcu_read_unlock_bh();
442 return n;
443}
444EXPORT_SYMBOL(neigh_lookup);
445
446struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
447 const void *pkey)
448{
449 struct neighbour *n;
450 int key_len = tbl->key_len;
451 u32 hash_val;
452 struct neigh_hash_table *nht;
453
454 NEIGH_CACHE_STAT_INC(tbl, lookups);
455
456 rcu_read_lock_bh();
457 nht = rcu_dereference_bh(tbl->nht);
458 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
459
460 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
461 n != NULL;
462 n = rcu_dereference_bh(n->next)) {
463 if (!memcmp(n->primary_key, pkey, key_len) &&
464 net_eq(dev_net(n->dev), net)) {
465 if (!atomic_inc_not_zero(&n->refcnt))
466 n = NULL;
467 NEIGH_CACHE_STAT_INC(tbl, hits);
468 break;
469 }
470 }
471
472 rcu_read_unlock_bh();
473 return n;
474}
475EXPORT_SYMBOL(neigh_lookup_nodev);
476
477struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
478 struct net_device *dev)
479{
480 u32 hash_val;
481 int key_len = tbl->key_len;
482 int error;
483 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
484 struct neigh_hash_table *nht;
485
486 if (!n) {
487 rc = ERR_PTR(-ENOBUFS);
488 goto out;
489 }
490
491 memcpy(n->primary_key, pkey, key_len);
492 n->dev = dev;
493 dev_hold(dev);
494
495 /* Protocol specific setup. */
496 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
497 rc = ERR_PTR(error);
498 goto out_neigh_release;
499 }
500
501 if (dev->netdev_ops->ndo_neigh_construct) {
502 error = dev->netdev_ops->ndo_neigh_construct(n);
503 if (error < 0) {
504 rc = ERR_PTR(error);
505 goto out_neigh_release;
506 }
507 }
508
509 /* Device specific setup. */
510 if (n->parms->neigh_setup &&
511 (error = n->parms->neigh_setup(n)) < 0) {
512 rc = ERR_PTR(error);
513 goto out_neigh_release;
514 }
515
516 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
517
518 write_lock_bh(&tbl->lock);
519 nht = rcu_dereference_protected(tbl->nht,
520 lockdep_is_held(&tbl->lock));
521
522 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
523 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
524
525 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
526
527 if (n->parms->dead) {
528 rc = ERR_PTR(-EINVAL);
529 goto out_tbl_unlock;
530 }
531
532 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
533 lockdep_is_held(&tbl->lock));
534 n1 != NULL;
535 n1 = rcu_dereference_protected(n1->next,
536 lockdep_is_held(&tbl->lock))) {
537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
538 neigh_hold(n1);
539 rc = n1;
540 goto out_tbl_unlock;
541 }
542 }
543
544 n->dead = 0;
545 neigh_hold(n);
546 rcu_assign_pointer(n->next,
547 rcu_dereference_protected(nht->hash_buckets[hash_val],
548 lockdep_is_held(&tbl->lock)));
549 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
550 write_unlock_bh(&tbl->lock);
551 NEIGH_PRINTK2("neigh %p is created.\n", n);
552 rc = n;
553out:
554 return rc;
555out_tbl_unlock:
556 write_unlock_bh(&tbl->lock);
557out_neigh_release:
558 neigh_release(n);
559 goto out;
560}
561EXPORT_SYMBOL(neigh_create);
562
563static u32 pneigh_hash(const void *pkey, int key_len)
564{
565 u32 hash_val = *(u32 *)(pkey + key_len - 4);
566 hash_val ^= (hash_val >> 16);
567 hash_val ^= hash_val >> 8;
568 hash_val ^= hash_val >> 4;
569 hash_val &= PNEIGH_HASHMASK;
570 return hash_val;
571}
572
573static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
574 struct net *net,
575 const void *pkey,
576 int key_len,
577 struct net_device *dev)
578{
579 while (n) {
580 if (!memcmp(n->key, pkey, key_len) &&
581 net_eq(pneigh_net(n), net) &&
582 (n->dev == dev || !n->dev))
583 return n;
584 n = n->next;
585 }
586 return NULL;
587}
588
589struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
590 struct net *net, const void *pkey, struct net_device *dev)
591{
592 int key_len = tbl->key_len;
593 u32 hash_val = pneigh_hash(pkey, key_len);
594
595 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
596 net, pkey, key_len, dev);
597}
598EXPORT_SYMBOL_GPL(__pneigh_lookup);
599
600struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
601 struct net *net, const void *pkey,
602 struct net_device *dev, int creat)
603{
604 struct pneigh_entry *n;
605 int key_len = tbl->key_len;
606 u32 hash_val = pneigh_hash(pkey, key_len);
607
608 read_lock_bh(&tbl->lock);
609 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
610 net, pkey, key_len, dev);
611 read_unlock_bh(&tbl->lock);
612
613 if (n || !creat)
614 goto out;
615
616 ASSERT_RTNL();
617
618 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
619 if (!n)
620 goto out;
621
622 write_pnet(&n->net, hold_net(net));
623 memcpy(n->key, pkey, key_len);
624 n->dev = dev;
625 if (dev)
626 dev_hold(dev);
627
628 if (tbl->pconstructor && tbl->pconstructor(n)) {
629 if (dev)
630 dev_put(dev);
631 release_net(net);
632 kfree(n);
633 n = NULL;
634 goto out;
635 }
636
637 write_lock_bh(&tbl->lock);
638 n->next = tbl->phash_buckets[hash_val];
639 tbl->phash_buckets[hash_val] = n;
640 write_unlock_bh(&tbl->lock);
641out:
642 return n;
643}
644EXPORT_SYMBOL(pneigh_lookup);
645
646
647int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
648 struct net_device *dev)
649{
650 struct pneigh_entry *n, **np;
651 int key_len = tbl->key_len;
652 u32 hash_val = pneigh_hash(pkey, key_len);
653
654 write_lock_bh(&tbl->lock);
655 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
656 np = &n->next) {
657 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
658 net_eq(pneigh_net(n), net)) {
659 *np = n->next;
660 write_unlock_bh(&tbl->lock);
661 if (tbl->pdestructor)
662 tbl->pdestructor(n);
663 if (n->dev)
664 dev_put(n->dev);
665 release_net(pneigh_net(n));
666 kfree(n);
667 return 0;
668 }
669 }
670 write_unlock_bh(&tbl->lock);
671 return -ENOENT;
672}
673
674static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
675{
676 struct pneigh_entry *n, **np;
677 u32 h;
678
679 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
680 np = &tbl->phash_buckets[h];
681 while ((n = *np) != NULL) {
682 if (!dev || n->dev == dev) {
683 *np = n->next;
684 if (tbl->pdestructor)
685 tbl->pdestructor(n);
686 if (n->dev)
687 dev_put(n->dev);
688 release_net(pneigh_net(n));
689 kfree(n);
690 continue;
691 }
692 np = &n->next;
693 }
694 }
695 return -ENOENT;
696}
697
698static void neigh_parms_destroy(struct neigh_parms *parms);
699
700static inline void neigh_parms_put(struct neigh_parms *parms)
701{
702 if (atomic_dec_and_test(&parms->refcnt))
703 neigh_parms_destroy(parms);
704}
705
706/*
707 * neighbour must already be out of the table;
708 *
709 */
710void neigh_destroy(struct neighbour *neigh)
711{
712 struct net_device *dev = neigh->dev;
713
714 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
715
716 if (!neigh->dead) {
717 pr_warn("Destroying alive neighbour %p\n", neigh);
718 dump_stack();
719 return;
720 }
721
722 if (neigh_del_timer(neigh))
723 pr_warn("Impossible event\n");
724
725 skb_queue_purge(&neigh->arp_queue);
726 neigh->arp_queue_len_bytes = 0;
727
728 if (dev->netdev_ops->ndo_neigh_destroy)
729 dev->netdev_ops->ndo_neigh_destroy(neigh);
730
731 dev_put(dev);
732 neigh_parms_put(neigh->parms);
733
734 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
735
736 atomic_dec(&neigh->tbl->entries);
737 kfree_rcu(neigh, rcu);
738}
739EXPORT_SYMBOL(neigh_destroy);
740
741/* Neighbour state is suspicious;
742 disable fast path.
743
744 Called with write_locked neigh.
745 */
746static void neigh_suspect(struct neighbour *neigh)
747{
748 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
749
750 neigh->output = neigh->ops->output;
751}
752
753/* Neighbour state is OK;
754 enable fast path.
755
756 Called with write_locked neigh.
757 */
758static void neigh_connect(struct neighbour *neigh)
759{
760 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
761
762 neigh->output = neigh->ops->connected_output;
763}
764
765static void neigh_periodic_work(struct work_struct *work)
766{
767 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
768 struct neighbour *n;
769 struct neighbour __rcu **np;
770 unsigned int i;
771 struct neigh_hash_table *nht;
772
773 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
774
775 write_lock_bh(&tbl->lock);
776 nht = rcu_dereference_protected(tbl->nht,
777 lockdep_is_held(&tbl->lock));
778
779 /*
780 * periodically recompute ReachableTime from random function
781 */
782
783 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
784 struct neigh_parms *p;
785 tbl->last_rand = jiffies;
786 for (p = &tbl->parms; p; p = p->next)
787 p->reachable_time =
788 neigh_rand_reach_time(p->base_reachable_time);
789 }
790
791 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
792 np = &nht->hash_buckets[i];
793
794 while ((n = rcu_dereference_protected(*np,
795 lockdep_is_held(&tbl->lock))) != NULL) {
796 unsigned int state;
797
798 write_lock(&n->lock);
799
800 state = n->nud_state;
801 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
802 write_unlock(&n->lock);
803 goto next_elt;
804 }
805
806 if (time_before(n->used, n->confirmed))
807 n->used = n->confirmed;
808
809 if (atomic_read(&n->refcnt) == 1 &&
810 (state == NUD_FAILED ||
811 time_after(jiffies, n->used + n->parms->gc_staletime))) {
812 *np = n->next;
813 n->dead = 1;
814 write_unlock(&n->lock);
815 neigh_cleanup_and_release(n);
816 continue;
817 }
818 write_unlock(&n->lock);
819
820next_elt:
821 np = &n->next;
822 }
823 /*
824 * It's fine to release lock here, even if hash table
825 * grows while we are preempted.
826 */
827 write_unlock_bh(&tbl->lock);
828 cond_resched();
829 write_lock_bh(&tbl->lock);
830 nht = rcu_dereference_protected(tbl->nht,
831 lockdep_is_held(&tbl->lock));
832 }
833 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
834 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
835 * base_reachable_time.
836 */
837 schedule_delayed_work(&tbl->gc_work,
838 tbl->parms.base_reachable_time >> 1);
839 write_unlock_bh(&tbl->lock);
840}
841
842static __inline__ int neigh_max_probes(struct neighbour *n)
843{
844 struct neigh_parms *p = n->parms;
845 return (n->nud_state & NUD_PROBE) ?
846 p->ucast_probes :
847 p->ucast_probes + p->app_probes + p->mcast_probes;
848}
849
850static void neigh_invalidate(struct neighbour *neigh)
851 __releases(neigh->lock)
852 __acquires(neigh->lock)
853{
854 struct sk_buff *skb;
855
856 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
857 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
858 neigh->updated = jiffies;
859
860 /* It is very thin place. report_unreachable is very complicated
861 routine. Particularly, it can hit the same neighbour entry!
862
863 So that, we try to be accurate and avoid dead loop. --ANK
864 */
865 while (neigh->nud_state == NUD_FAILED &&
866 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
867 write_unlock(&neigh->lock);
868 neigh->ops->error_report(neigh, skb);
869 write_lock(&neigh->lock);
870 }
871 skb_queue_purge(&neigh->arp_queue);
872 neigh->arp_queue_len_bytes = 0;
873}
874
875static void neigh_probe(struct neighbour *neigh)
876 __releases(neigh->lock)
877{
878 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
879 /* keep skb alive even if arp_queue overflows */
880 if (skb)
881 skb = skb_copy(skb, GFP_ATOMIC);
882 write_unlock(&neigh->lock);
883 neigh->ops->solicit(neigh, skb);
884 atomic_inc(&neigh->probes);
885 kfree_skb(skb);
886}
887
888/* Called when a timer expires for a neighbour entry. */
889
890static void neigh_timer_handler(unsigned long arg)
891{
892 unsigned long now, next;
893 struct neighbour *neigh = (struct neighbour *)arg;
894 unsigned int state;
895 int notify = 0;
896
897 write_lock(&neigh->lock);
898
899 state = neigh->nud_state;
900 now = jiffies;
901 next = now + HZ;
902
903 if (!(state & NUD_IN_TIMER))
904 goto out;
905
906 if (state & NUD_REACHABLE) {
907 if (time_before_eq(now,
908 neigh->confirmed + neigh->parms->reachable_time)) {
909 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
910 next = neigh->confirmed + neigh->parms->reachable_time;
911 } else if (time_before_eq(now,
912 neigh->used + neigh->parms->delay_probe_time)) {
913 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
914 neigh->nud_state = NUD_DELAY;
915 neigh->updated = jiffies;
916 neigh_suspect(neigh);
917 next = now + neigh->parms->delay_probe_time;
918 } else {
919 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
920 neigh->nud_state = NUD_STALE;
921 neigh->updated = jiffies;
922 neigh_suspect(neigh);
923 notify = 1;
924 }
925 } else if (state & NUD_DELAY) {
926 if (time_before_eq(now,
927 neigh->confirmed + neigh->parms->delay_probe_time)) {
928 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
929 neigh->nud_state = NUD_REACHABLE;
930 neigh->updated = jiffies;
931 neigh_connect(neigh);
932 notify = 1;
933 next = neigh->confirmed + neigh->parms->reachable_time;
934 } else {
935 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
936 neigh->nud_state = NUD_PROBE;
937 neigh->updated = jiffies;
938 atomic_set(&neigh->probes, 0);
939 next = now + neigh->parms->retrans_time;
940 }
941 } else {
942 /* NUD_PROBE|NUD_INCOMPLETE */
943 next = now + neigh->parms->retrans_time;
944 }
945
946 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
947 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
948 neigh->nud_state = NUD_FAILED;
949 notify = 1;
950 neigh_invalidate(neigh);
951 }
952
953 if (neigh->nud_state & NUD_IN_TIMER) {
954 if (time_before(next, jiffies + HZ/2))
955 next = jiffies + HZ/2;
956 if (!mod_timer(&neigh->timer, next))
957 neigh_hold(neigh);
958 }
959 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
960 neigh_probe(neigh);
961 } else {
962out:
963 write_unlock(&neigh->lock);
964 }
965
966 if (notify)
967 neigh_update_notify(neigh);
968
969 neigh_release(neigh);
970}
971
972int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
973{
974 int rc;
975 bool immediate_probe = false;
976
977 write_lock_bh(&neigh->lock);
978
979 rc = 0;
980 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
981 goto out_unlock_bh;
982
983 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
984 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
985 unsigned long next, now = jiffies;
986
987 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
988 neigh->nud_state = NUD_INCOMPLETE;
989 neigh->updated = now;
990 next = now + max(neigh->parms->retrans_time, HZ/2);
991 neigh_add_timer(neigh, next);
992 immediate_probe = true;
993 } else {
994 neigh->nud_state = NUD_FAILED;
995 neigh->updated = jiffies;
996 write_unlock_bh(&neigh->lock);
997
998 kfree_skb(skb);
999 return 1;
1000 }
1001 } else if (neigh->nud_state & NUD_STALE) {
1002 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1003 neigh->nud_state = NUD_DELAY;
1004 neigh->updated = jiffies;
1005 neigh_add_timer(neigh,
1006 jiffies + neigh->parms->delay_probe_time);
1007 }
1008
1009 if (neigh->nud_state == NUD_INCOMPLETE) {
1010 if (skb) {
1011 while (neigh->arp_queue_len_bytes + skb->truesize >
1012 neigh->parms->queue_len_bytes) {
1013 struct sk_buff *buff;
1014
1015 buff = __skb_dequeue(&neigh->arp_queue);
1016 if (!buff)
1017 break;
1018 neigh->arp_queue_len_bytes -= buff->truesize;
1019 kfree_skb(buff);
1020 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1021 }
1022 skb_dst_force(skb);
1023 __skb_queue_tail(&neigh->arp_queue, skb);
1024 neigh->arp_queue_len_bytes += skb->truesize;
1025 }
1026 rc = 1;
1027 }
1028out_unlock_bh:
1029 if (immediate_probe)
1030 neigh_probe(neigh);
1031 else
1032 write_unlock(&neigh->lock);
1033 local_bh_enable();
1034 return rc;
1035}
1036EXPORT_SYMBOL(__neigh_event_send);
1037
1038static void neigh_update_hhs(struct neighbour *neigh)
1039{
1040 struct hh_cache *hh;
1041 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1042 = NULL;
1043
1044 if (neigh->dev->header_ops)
1045 update = neigh->dev->header_ops->cache_update;
1046
1047 if (update) {
1048 hh = &neigh->hh;
1049 if (hh->hh_len) {
1050 write_seqlock_bh(&hh->hh_lock);
1051 update(hh, neigh->dev, neigh->ha);
1052 write_sequnlock_bh(&hh->hh_lock);
1053 }
1054 }
1055}
1056
1057
1058
1059/* Generic update routine.
1060 -- lladdr is new lladdr or NULL, if it is not supplied.
1061 -- new is new state.
1062 -- flags
1063 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1064 if it is different.
1065 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1066 lladdr instead of overriding it
1067 if it is different.
1068 It also allows to retain current state
1069 if lladdr is unchanged.
1070 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1071
1072 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1073 NTF_ROUTER flag.
1074 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1075 a router.
1076
1077 Caller MUST hold reference count on the entry.
1078 */
1079
1080int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1081 u32 flags)
1082{
1083 u8 old;
1084 int err;
1085 int notify = 0;
1086 struct net_device *dev;
1087 int update_isrouter = 0;
1088
1089 write_lock_bh(&neigh->lock);
1090
1091 dev = neigh->dev;
1092 old = neigh->nud_state;
1093 err = -EPERM;
1094
1095 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1096 (old & (NUD_NOARP | NUD_PERMANENT)))
1097 goto out;
1098
1099 if (!(new & NUD_VALID)) {
1100 neigh_del_timer(neigh);
1101 if (old & NUD_CONNECTED)
1102 neigh_suspect(neigh);
1103 neigh->nud_state = new;
1104 err = 0;
1105 notify = old & NUD_VALID;
1106 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1107 (new & NUD_FAILED)) {
1108 neigh_invalidate(neigh);
1109 notify = 1;
1110 }
1111 goto out;
1112 }
1113
1114 /* Compare new lladdr with cached one */
1115 if (!dev->addr_len) {
1116 /* First case: device needs no address. */
1117 lladdr = neigh->ha;
1118 } else if (lladdr) {
1119 /* The second case: if something is already cached
1120 and a new address is proposed:
1121 - compare new & old
1122 - if they are different, check override flag
1123 */
1124 if ((old & NUD_VALID) &&
1125 !memcmp(lladdr, neigh->ha, dev->addr_len))
1126 lladdr = neigh->ha;
1127 } else {
1128 /* No address is supplied; if we know something,
1129 use it, otherwise discard the request.
1130 */
1131 err = -EINVAL;
1132 if (!(old & NUD_VALID))
1133 goto out;
1134 lladdr = neigh->ha;
1135 }
1136
1137 if (new & NUD_CONNECTED)
1138 neigh->confirmed = jiffies;
1139 neigh->updated = jiffies;
1140
1141 /* If entry was valid and address is not changed,
1142 do not change entry state, if new one is STALE.
1143 */
1144 err = 0;
1145 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1146 if (old & NUD_VALID) {
1147 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1148 update_isrouter = 0;
1149 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1150 (old & NUD_CONNECTED)) {
1151 lladdr = neigh->ha;
1152 new = NUD_STALE;
1153 } else
1154 goto out;
1155 } else {
1156 if (lladdr == neigh->ha && new == NUD_STALE &&
1157 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1158 (old & NUD_CONNECTED))
1159 )
1160 new = old;
1161 }
1162 }
1163
1164 if (new != old) {
1165 neigh_del_timer(neigh);
1166 if (new & NUD_IN_TIMER)
1167 neigh_add_timer(neigh, (jiffies +
1168 ((new & NUD_REACHABLE) ?
1169 neigh->parms->reachable_time :
1170 0)));
1171 neigh->nud_state = new;
1172 }
1173
1174 if (lladdr != neigh->ha) {
1175 write_seqlock(&neigh->ha_lock);
1176 memcpy(&neigh->ha, lladdr, dev->addr_len);
1177 write_sequnlock(&neigh->ha_lock);
1178 neigh_update_hhs(neigh);
1179 if (!(new & NUD_CONNECTED))
1180 neigh->confirmed = jiffies -
1181 (neigh->parms->base_reachable_time << 1);
1182 notify = 1;
1183 }
1184 if (new == old)
1185 goto out;
1186 if (new & NUD_CONNECTED)
1187 neigh_connect(neigh);
1188 else
1189 neigh_suspect(neigh);
1190 if (!(old & NUD_VALID)) {
1191 struct sk_buff *skb;
1192
1193 /* Again: avoid dead loop if something went wrong */
1194
1195 while (neigh->nud_state & NUD_VALID &&
1196 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1197 struct dst_entry *dst = skb_dst(skb);
1198 struct neighbour *n2, *n1 = neigh;
1199 write_unlock_bh(&neigh->lock);
1200
1201 rcu_read_lock();
1202 /* On shaper/eql skb->dst->neighbour != neigh :( */
1203 if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
1204 n1 = n2;
1205 n1->output(n1, skb);
1206 rcu_read_unlock();
1207
1208 write_lock_bh(&neigh->lock);
1209 }
1210 skb_queue_purge(&neigh->arp_queue);
1211 neigh->arp_queue_len_bytes = 0;
1212 }
1213out:
1214 if (update_isrouter) {
1215 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1216 (neigh->flags | NTF_ROUTER) :
1217 (neigh->flags & ~NTF_ROUTER);
1218 }
1219 write_unlock_bh(&neigh->lock);
1220
1221 if (notify)
1222 neigh_update_notify(neigh);
1223
1224 return err;
1225}
1226EXPORT_SYMBOL(neigh_update);
1227
1228struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1229 u8 *lladdr, void *saddr,
1230 struct net_device *dev)
1231{
1232 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1233 lladdr || !dev->addr_len);
1234 if (neigh)
1235 neigh_update(neigh, lladdr, NUD_STALE,
1236 NEIGH_UPDATE_F_OVERRIDE);
1237 return neigh;
1238}
1239EXPORT_SYMBOL(neigh_event_ns);
1240
1241/* called with read_lock_bh(&n->lock); */
1242static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1243{
1244 struct net_device *dev = dst->dev;
1245 __be16 prot = dst->ops->protocol;
1246 struct hh_cache *hh = &n->hh;
1247
1248 write_lock_bh(&n->lock);
1249
1250 /* Only one thread can come in here and initialize the
1251 * hh_cache entry.
1252 */
1253 if (!hh->hh_len)
1254 dev->header_ops->cache(n, hh, prot);
1255
1256 write_unlock_bh(&n->lock);
1257}
1258
1259/* This function can be used in contexts, where only old dev_queue_xmit
1260 * worked, f.e. if you want to override normal output path (eql, shaper),
1261 * but resolution is not made yet.
1262 */
1263
1264int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1265{
1266 struct net_device *dev = skb->dev;
1267
1268 __skb_pull(skb, skb_network_offset(skb));
1269
1270 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1271 skb->len) < 0 &&
1272 dev->header_ops->rebuild(skb))
1273 return 0;
1274
1275 return dev_queue_xmit(skb);
1276}
1277EXPORT_SYMBOL(neigh_compat_output);
1278
1279/* Slow and careful. */
1280
1281int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1282{
1283 struct dst_entry *dst = skb_dst(skb);
1284 int rc = 0;
1285
1286 if (!dst)
1287 goto discard;
1288
1289 __skb_pull(skb, skb_network_offset(skb));
1290
1291 if (!neigh_event_send(neigh, skb)) {
1292 int err;
1293 struct net_device *dev = neigh->dev;
1294 unsigned int seq;
1295
1296 if (dev->header_ops->cache && !neigh->hh.hh_len)
1297 neigh_hh_init(neigh, dst);
1298
1299 do {
1300 seq = read_seqbegin(&neigh->ha_lock);
1301 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1302 neigh->ha, NULL, skb->len);
1303 } while (read_seqretry(&neigh->ha_lock, seq));
1304
1305 if (err >= 0)
1306 rc = dev_queue_xmit(skb);
1307 else
1308 goto out_kfree_skb;
1309 }
1310out:
1311 return rc;
1312discard:
1313 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1314 dst, neigh);
1315out_kfree_skb:
1316 rc = -EINVAL;
1317 kfree_skb(skb);
1318 goto out;
1319}
1320EXPORT_SYMBOL(neigh_resolve_output);
1321
1322/* As fast as possible without hh cache */
1323
1324int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1325{
1326 struct net_device *dev = neigh->dev;
1327 unsigned int seq;
1328 int err;
1329
1330 __skb_pull(skb, skb_network_offset(skb));
1331
1332 do {
1333 seq = read_seqbegin(&neigh->ha_lock);
1334 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1335 neigh->ha, NULL, skb->len);
1336 } while (read_seqretry(&neigh->ha_lock, seq));
1337
1338 if (err >= 0)
1339 err = dev_queue_xmit(skb);
1340 else {
1341 err = -EINVAL;
1342 kfree_skb(skb);
1343 }
1344 return err;
1345}
1346EXPORT_SYMBOL(neigh_connected_output);
1347
1348int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1349{
1350 return dev_queue_xmit(skb);
1351}
1352EXPORT_SYMBOL(neigh_direct_output);
1353
1354static void neigh_proxy_process(unsigned long arg)
1355{
1356 struct neigh_table *tbl = (struct neigh_table *)arg;
1357 long sched_next = 0;
1358 unsigned long now = jiffies;
1359 struct sk_buff *skb, *n;
1360
1361 spin_lock(&tbl->proxy_queue.lock);
1362
1363 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1364 long tdif = NEIGH_CB(skb)->sched_next - now;
1365
1366 if (tdif <= 0) {
1367 struct net_device *dev = skb->dev;
1368
1369 __skb_unlink(skb, &tbl->proxy_queue);
1370 if (tbl->proxy_redo && netif_running(dev)) {
1371 rcu_read_lock();
1372 tbl->proxy_redo(skb);
1373 rcu_read_unlock();
1374 } else {
1375 kfree_skb(skb);
1376 }
1377
1378 dev_put(dev);
1379 } else if (!sched_next || tdif < sched_next)
1380 sched_next = tdif;
1381 }
1382 del_timer(&tbl->proxy_timer);
1383 if (sched_next)
1384 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1385 spin_unlock(&tbl->proxy_queue.lock);
1386}
1387
1388void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1389 struct sk_buff *skb)
1390{
1391 unsigned long now = jiffies;
1392 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1393
1394 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1395 kfree_skb(skb);
1396 return;
1397 }
1398
1399 NEIGH_CB(skb)->sched_next = sched_next;
1400 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1401
1402 spin_lock(&tbl->proxy_queue.lock);
1403 if (del_timer(&tbl->proxy_timer)) {
1404 if (time_before(tbl->proxy_timer.expires, sched_next))
1405 sched_next = tbl->proxy_timer.expires;
1406 }
1407 skb_dst_drop(skb);
1408 dev_hold(skb->dev);
1409 __skb_queue_tail(&tbl->proxy_queue, skb);
1410 mod_timer(&tbl->proxy_timer, sched_next);
1411 spin_unlock(&tbl->proxy_queue.lock);
1412}
1413EXPORT_SYMBOL(pneigh_enqueue);
1414
1415static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1416 struct net *net, int ifindex)
1417{
1418 struct neigh_parms *p;
1419
1420 for (p = &tbl->parms; p; p = p->next) {
1421 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1422 (!p->dev && !ifindex))
1423 return p;
1424 }
1425
1426 return NULL;
1427}
1428
1429struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1430 struct neigh_table *tbl)
1431{
1432 struct neigh_parms *p, *ref;
1433 struct net *net = dev_net(dev);
1434 const struct net_device_ops *ops = dev->netdev_ops;
1435
1436 ref = lookup_neigh_parms(tbl, net, 0);
1437 if (!ref)
1438 return NULL;
1439
1440 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1441 if (p) {
1442 p->tbl = tbl;
1443 atomic_set(&p->refcnt, 1);
1444 p->reachable_time =
1445 neigh_rand_reach_time(p->base_reachable_time);
1446
1447 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1448 kfree(p);
1449 return NULL;
1450 }
1451
1452 dev_hold(dev);
1453 p->dev = dev;
1454 write_pnet(&p->net, hold_net(net));
1455 p->sysctl_table = NULL;
1456 write_lock_bh(&tbl->lock);
1457 p->next = tbl->parms.next;
1458 tbl->parms.next = p;
1459 write_unlock_bh(&tbl->lock);
1460 }
1461 return p;
1462}
1463EXPORT_SYMBOL(neigh_parms_alloc);
1464
1465static void neigh_rcu_free_parms(struct rcu_head *head)
1466{
1467 struct neigh_parms *parms =
1468 container_of(head, struct neigh_parms, rcu_head);
1469
1470 neigh_parms_put(parms);
1471}
1472
1473void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1474{
1475 struct neigh_parms **p;
1476
1477 if (!parms || parms == &tbl->parms)
1478 return;
1479 write_lock_bh(&tbl->lock);
1480 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1481 if (*p == parms) {
1482 *p = parms->next;
1483 parms->dead = 1;
1484 write_unlock_bh(&tbl->lock);
1485 if (parms->dev)
1486 dev_put(parms->dev);
1487 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1488 return;
1489 }
1490 }
1491 write_unlock_bh(&tbl->lock);
1492 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1493}
1494EXPORT_SYMBOL(neigh_parms_release);
1495
1496static void neigh_parms_destroy(struct neigh_parms *parms)
1497{
1498 release_net(neigh_parms_net(parms));
1499 kfree(parms);
1500}
1501
1502static struct lock_class_key neigh_table_proxy_queue_class;
1503
1504static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1505{
1506 unsigned long now = jiffies;
1507 unsigned long phsize;
1508
1509 write_pnet(&tbl->parms.net, &init_net);
1510 atomic_set(&tbl->parms.refcnt, 1);
1511 tbl->parms.reachable_time =
1512 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1513
1514 tbl->stats = alloc_percpu(struct neigh_statistics);
1515 if (!tbl->stats)
1516 panic("cannot create neighbour cache statistics");
1517
1518#ifdef CONFIG_PROC_FS
1519 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1520 &neigh_stat_seq_fops, tbl))
1521 panic("cannot create neighbour proc dir entry");
1522#endif
1523
1524 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1525
1526 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1527 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1528
1529 if (!tbl->nht || !tbl->phash_buckets)
1530 panic("cannot allocate neighbour cache hashes");
1531
1532 rwlock_init(&tbl->lock);
1533 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1534 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1535 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1536 skb_queue_head_init_class(&tbl->proxy_queue,
1537 &neigh_table_proxy_queue_class);
1538
1539 tbl->last_flush = now;
1540 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1541}
1542
1543void neigh_table_init(struct neigh_table *tbl)
1544{
1545 struct neigh_table *tmp;
1546
1547 neigh_table_init_no_netlink(tbl);
1548 write_lock(&neigh_tbl_lock);
1549 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1550 if (tmp->family == tbl->family)
1551 break;
1552 }
1553 tbl->next = neigh_tables;
1554 neigh_tables = tbl;
1555 write_unlock(&neigh_tbl_lock);
1556
1557 if (unlikely(tmp)) {
1558 pr_err("Registering multiple tables for family %d\n",
1559 tbl->family);
1560 dump_stack();
1561 }
1562}
1563EXPORT_SYMBOL(neigh_table_init);
1564
1565int neigh_table_clear(struct neigh_table *tbl)
1566{
1567 struct neigh_table **tp;
1568
1569 /* It is not clean... Fix it to unload IPv6 module safely */
1570 cancel_delayed_work_sync(&tbl->gc_work);
1571 del_timer_sync(&tbl->proxy_timer);
1572 pneigh_queue_purge(&tbl->proxy_queue);
1573 neigh_ifdown(tbl, NULL);
1574 if (atomic_read(&tbl->entries))
1575 pr_crit("neighbour leakage\n");
1576 write_lock(&neigh_tbl_lock);
1577 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1578 if (*tp == tbl) {
1579 *tp = tbl->next;
1580 break;
1581 }
1582 }
1583 write_unlock(&neigh_tbl_lock);
1584
1585 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1586 neigh_hash_free_rcu);
1587 tbl->nht = NULL;
1588
1589 kfree(tbl->phash_buckets);
1590 tbl->phash_buckets = NULL;
1591
1592 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1593
1594 free_percpu(tbl->stats);
1595 tbl->stats = NULL;
1596
1597 return 0;
1598}
1599EXPORT_SYMBOL(neigh_table_clear);
1600
1601static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1602{
1603 struct net *net = sock_net(skb->sk);
1604 struct ndmsg *ndm;
1605 struct nlattr *dst_attr;
1606 struct neigh_table *tbl;
1607 struct net_device *dev = NULL;
1608 int err = -EINVAL;
1609
1610 ASSERT_RTNL();
1611 if (nlmsg_len(nlh) < sizeof(*ndm))
1612 goto out;
1613
1614 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1615 if (dst_attr == NULL)
1616 goto out;
1617
1618 ndm = nlmsg_data(nlh);
1619 if (ndm->ndm_ifindex) {
1620 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1621 if (dev == NULL) {
1622 err = -ENODEV;
1623 goto out;
1624 }
1625 }
1626
1627 read_lock(&neigh_tbl_lock);
1628 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1629 struct neighbour *neigh;
1630
1631 if (tbl->family != ndm->ndm_family)
1632 continue;
1633 read_unlock(&neigh_tbl_lock);
1634
1635 if (nla_len(dst_attr) < tbl->key_len)
1636 goto out;
1637
1638 if (ndm->ndm_flags & NTF_PROXY) {
1639 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1640 goto out;
1641 }
1642
1643 if (dev == NULL)
1644 goto out;
1645
1646 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1647 if (neigh == NULL) {
1648 err = -ENOENT;
1649 goto out;
1650 }
1651
1652 err = neigh_update(neigh, NULL, NUD_FAILED,
1653 NEIGH_UPDATE_F_OVERRIDE |
1654 NEIGH_UPDATE_F_ADMIN);
1655 neigh_release(neigh);
1656 goto out;
1657 }
1658 read_unlock(&neigh_tbl_lock);
1659 err = -EAFNOSUPPORT;
1660
1661out:
1662 return err;
1663}
1664
1665static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1666{
1667 struct net *net = sock_net(skb->sk);
1668 struct ndmsg *ndm;
1669 struct nlattr *tb[NDA_MAX+1];
1670 struct neigh_table *tbl;
1671 struct net_device *dev = NULL;
1672 int err;
1673
1674 ASSERT_RTNL();
1675 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1676 if (err < 0)
1677 goto out;
1678
1679 err = -EINVAL;
1680 if (tb[NDA_DST] == NULL)
1681 goto out;
1682
1683 ndm = nlmsg_data(nlh);
1684 if (ndm->ndm_ifindex) {
1685 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1686 if (dev == NULL) {
1687 err = -ENODEV;
1688 goto out;
1689 }
1690
1691 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1692 goto out;
1693 }
1694
1695 read_lock(&neigh_tbl_lock);
1696 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1697 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1698 struct neighbour *neigh;
1699 void *dst, *lladdr;
1700
1701 if (tbl->family != ndm->ndm_family)
1702 continue;
1703 read_unlock(&neigh_tbl_lock);
1704
1705 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1706 goto out;
1707 dst = nla_data(tb[NDA_DST]);
1708 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1709
1710 if (ndm->ndm_flags & NTF_PROXY) {
1711 struct pneigh_entry *pn;
1712
1713 err = -ENOBUFS;
1714 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1715 if (pn) {
1716 pn->flags = ndm->ndm_flags;
1717 err = 0;
1718 }
1719 goto out;
1720 }
1721
1722 if (dev == NULL)
1723 goto out;
1724
1725 neigh = neigh_lookup(tbl, dst, dev);
1726 if (neigh == NULL) {
1727 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1728 err = -ENOENT;
1729 goto out;
1730 }
1731
1732 neigh = __neigh_lookup_errno(tbl, dst, dev);
1733 if (IS_ERR(neigh)) {
1734 err = PTR_ERR(neigh);
1735 goto out;
1736 }
1737 } else {
1738 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1739 err = -EEXIST;
1740 neigh_release(neigh);
1741 goto out;
1742 }
1743
1744 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1745 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1746 }
1747
1748 if (ndm->ndm_flags & NTF_USE) {
1749 neigh_event_send(neigh, NULL);
1750 err = 0;
1751 } else
1752 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1753 neigh_release(neigh);
1754 goto out;
1755 }
1756
1757 read_unlock(&neigh_tbl_lock);
1758 err = -EAFNOSUPPORT;
1759out:
1760 return err;
1761}
1762
1763static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1764{
1765 struct nlattr *nest;
1766
1767 nest = nla_nest_start(skb, NDTA_PARMS);
1768 if (nest == NULL)
1769 return -ENOBUFS;
1770
1771 if ((parms->dev &&
1772 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1773 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1774 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1775 /* approximative value for deprecated QUEUE_LEN (in packets) */
1776 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1777 DIV_ROUND_UP(parms->queue_len_bytes,
1778 SKB_TRUESIZE(ETH_FRAME_LEN))) ||
1779 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1780 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1781 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
1782 nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
1783 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1784 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1785 parms->base_reachable_time) ||
1786 nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
1787 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1788 parms->delay_probe_time) ||
1789 nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
1790 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
1791 nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
1792 nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
1793 goto nla_put_failure;
1794 return nla_nest_end(skb, nest);
1795
1796nla_put_failure:
1797 nla_nest_cancel(skb, nest);
1798 return -EMSGSIZE;
1799}
1800
1801static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1802 u32 pid, u32 seq, int type, int flags)
1803{
1804 struct nlmsghdr *nlh;
1805 struct ndtmsg *ndtmsg;
1806
1807 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1808 if (nlh == NULL)
1809 return -EMSGSIZE;
1810
1811 ndtmsg = nlmsg_data(nlh);
1812
1813 read_lock_bh(&tbl->lock);
1814 ndtmsg->ndtm_family = tbl->family;
1815 ndtmsg->ndtm_pad1 = 0;
1816 ndtmsg->ndtm_pad2 = 0;
1817
1818 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1819 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1820 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1821 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1822 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1823 goto nla_put_failure;
1824 {
1825 unsigned long now = jiffies;
1826 unsigned int flush_delta = now - tbl->last_flush;
1827 unsigned int rand_delta = now - tbl->last_rand;
1828 struct neigh_hash_table *nht;
1829 struct ndt_config ndc = {
1830 .ndtc_key_len = tbl->key_len,
1831 .ndtc_entry_size = tbl->entry_size,
1832 .ndtc_entries = atomic_read(&tbl->entries),
1833 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1834 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1835 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1836 };
1837
1838 rcu_read_lock_bh();
1839 nht = rcu_dereference_bh(tbl->nht);
1840 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1841 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1842 rcu_read_unlock_bh();
1843
1844 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1845 goto nla_put_failure;
1846 }
1847
1848 {
1849 int cpu;
1850 struct ndt_stats ndst;
1851
1852 memset(&ndst, 0, sizeof(ndst));
1853
1854 for_each_possible_cpu(cpu) {
1855 struct neigh_statistics *st;
1856
1857 st = per_cpu_ptr(tbl->stats, cpu);
1858 ndst.ndts_allocs += st->allocs;
1859 ndst.ndts_destroys += st->destroys;
1860 ndst.ndts_hash_grows += st->hash_grows;
1861 ndst.ndts_res_failed += st->res_failed;
1862 ndst.ndts_lookups += st->lookups;
1863 ndst.ndts_hits += st->hits;
1864 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1865 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1866 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1867 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1868 }
1869
1870 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1871 goto nla_put_failure;
1872 }
1873
1874 BUG_ON(tbl->parms.dev);
1875 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1876 goto nla_put_failure;
1877
1878 read_unlock_bh(&tbl->lock);
1879 return nlmsg_end(skb, nlh);
1880
1881nla_put_failure:
1882 read_unlock_bh(&tbl->lock);
1883 nlmsg_cancel(skb, nlh);
1884 return -EMSGSIZE;
1885}
1886
1887static int neightbl_fill_param_info(struct sk_buff *skb,
1888 struct neigh_table *tbl,
1889 struct neigh_parms *parms,
1890 u32 pid, u32 seq, int type,
1891 unsigned int flags)
1892{
1893 struct ndtmsg *ndtmsg;
1894 struct nlmsghdr *nlh;
1895
1896 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1897 if (nlh == NULL)
1898 return -EMSGSIZE;
1899
1900 ndtmsg = nlmsg_data(nlh);
1901
1902 read_lock_bh(&tbl->lock);
1903 ndtmsg->ndtm_family = tbl->family;
1904 ndtmsg->ndtm_pad1 = 0;
1905 ndtmsg->ndtm_pad2 = 0;
1906
1907 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1908 neightbl_fill_parms(skb, parms) < 0)
1909 goto errout;
1910
1911 read_unlock_bh(&tbl->lock);
1912 return nlmsg_end(skb, nlh);
1913errout:
1914 read_unlock_bh(&tbl->lock);
1915 nlmsg_cancel(skb, nlh);
1916 return -EMSGSIZE;
1917}
1918
1919static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1920 [NDTA_NAME] = { .type = NLA_STRING },
1921 [NDTA_THRESH1] = { .type = NLA_U32 },
1922 [NDTA_THRESH2] = { .type = NLA_U32 },
1923 [NDTA_THRESH3] = { .type = NLA_U32 },
1924 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1925 [NDTA_PARMS] = { .type = NLA_NESTED },
1926};
1927
1928static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1929 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1930 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1931 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1932 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1933 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1934 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1935 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1936 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1937 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1938 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1939 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1940 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1941 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1942};
1943
1944static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1945{
1946 struct net *net = sock_net(skb->sk);
1947 struct neigh_table *tbl;
1948 struct ndtmsg *ndtmsg;
1949 struct nlattr *tb[NDTA_MAX+1];
1950 int err;
1951
1952 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1953 nl_neightbl_policy);
1954 if (err < 0)
1955 goto errout;
1956
1957 if (tb[NDTA_NAME] == NULL) {
1958 err = -EINVAL;
1959 goto errout;
1960 }
1961
1962 ndtmsg = nlmsg_data(nlh);
1963 read_lock(&neigh_tbl_lock);
1964 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1965 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1966 continue;
1967
1968 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1969 break;
1970 }
1971
1972 if (tbl == NULL) {
1973 err = -ENOENT;
1974 goto errout_locked;
1975 }
1976
1977 /*
1978 * We acquire tbl->lock to be nice to the periodic timers and
1979 * make sure they always see a consistent set of values.
1980 */
1981 write_lock_bh(&tbl->lock);
1982
1983 if (tb[NDTA_PARMS]) {
1984 struct nlattr *tbp[NDTPA_MAX+1];
1985 struct neigh_parms *p;
1986 int i, ifindex = 0;
1987
1988 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1989 nl_ntbl_parm_policy);
1990 if (err < 0)
1991 goto errout_tbl_lock;
1992
1993 if (tbp[NDTPA_IFINDEX])
1994 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1995
1996 p = lookup_neigh_parms(tbl, net, ifindex);
1997 if (p == NULL) {
1998 err = -ENOENT;
1999 goto errout_tbl_lock;
2000 }
2001
2002 for (i = 1; i <= NDTPA_MAX; i++) {
2003 if (tbp[i] == NULL)
2004 continue;
2005
2006 switch (i) {
2007 case NDTPA_QUEUE_LEN:
2008 p->queue_len_bytes = nla_get_u32(tbp[i]) *
2009 SKB_TRUESIZE(ETH_FRAME_LEN);
2010 break;
2011 case NDTPA_QUEUE_LENBYTES:
2012 p->queue_len_bytes = nla_get_u32(tbp[i]);
2013 break;
2014 case NDTPA_PROXY_QLEN:
2015 p->proxy_qlen = nla_get_u32(tbp[i]);
2016 break;
2017 case NDTPA_APP_PROBES:
2018 p->app_probes = nla_get_u32(tbp[i]);
2019 break;
2020 case NDTPA_UCAST_PROBES:
2021 p->ucast_probes = nla_get_u32(tbp[i]);
2022 break;
2023 case NDTPA_MCAST_PROBES:
2024 p->mcast_probes = nla_get_u32(tbp[i]);
2025 break;
2026 case NDTPA_BASE_REACHABLE_TIME:
2027 p->base_reachable_time = nla_get_msecs(tbp[i]);
2028 break;
2029 case NDTPA_GC_STALETIME:
2030 p->gc_staletime = nla_get_msecs(tbp[i]);
2031 break;
2032 case NDTPA_DELAY_PROBE_TIME:
2033 p->delay_probe_time = nla_get_msecs(tbp[i]);
2034 break;
2035 case NDTPA_RETRANS_TIME:
2036 p->retrans_time = nla_get_msecs(tbp[i]);
2037 break;
2038 case NDTPA_ANYCAST_DELAY:
2039 p->anycast_delay = nla_get_msecs(tbp[i]);
2040 break;
2041 case NDTPA_PROXY_DELAY:
2042 p->proxy_delay = nla_get_msecs(tbp[i]);
2043 break;
2044 case NDTPA_LOCKTIME:
2045 p->locktime = nla_get_msecs(tbp[i]);
2046 break;
2047 }
2048 }
2049 }
2050
2051 if (tb[NDTA_THRESH1])
2052 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2053
2054 if (tb[NDTA_THRESH2])
2055 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2056
2057 if (tb[NDTA_THRESH3])
2058 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2059
2060 if (tb[NDTA_GC_INTERVAL])
2061 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2062
2063 err = 0;
2064
2065errout_tbl_lock:
2066 write_unlock_bh(&tbl->lock);
2067errout_locked:
2068 read_unlock(&neigh_tbl_lock);
2069errout:
2070 return err;
2071}
2072
2073static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2074{
2075 struct net *net = sock_net(skb->sk);
2076 int family, tidx, nidx = 0;
2077 int tbl_skip = cb->args[0];
2078 int neigh_skip = cb->args[1];
2079 struct neigh_table *tbl;
2080
2081 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2082
2083 read_lock(&neigh_tbl_lock);
2084 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2085 struct neigh_parms *p;
2086
2087 if (tidx < tbl_skip || (family && tbl->family != family))
2088 continue;
2089
2090 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2091 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2092 NLM_F_MULTI) <= 0)
2093 break;
2094
2095 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2096 if (!net_eq(neigh_parms_net(p), net))
2097 continue;
2098
2099 if (nidx < neigh_skip)
2100 goto next;
2101
2102 if (neightbl_fill_param_info(skb, tbl, p,
2103 NETLINK_CB(cb->skb).pid,
2104 cb->nlh->nlmsg_seq,
2105 RTM_NEWNEIGHTBL,
2106 NLM_F_MULTI) <= 0)
2107 goto out;
2108 next:
2109 nidx++;
2110 }
2111
2112 neigh_skip = 0;
2113 }
2114out:
2115 read_unlock(&neigh_tbl_lock);
2116 cb->args[0] = tidx;
2117 cb->args[1] = nidx;
2118
2119 return skb->len;
2120}
2121
2122static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2123 u32 pid, u32 seq, int type, unsigned int flags)
2124{
2125 unsigned long now = jiffies;
2126 struct nda_cacheinfo ci;
2127 struct nlmsghdr *nlh;
2128 struct ndmsg *ndm;
2129
2130 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2131 if (nlh == NULL)
2132 return -EMSGSIZE;
2133
2134 ndm = nlmsg_data(nlh);
2135 ndm->ndm_family = neigh->ops->family;
2136 ndm->ndm_pad1 = 0;
2137 ndm->ndm_pad2 = 0;
2138 ndm->ndm_flags = neigh->flags;
2139 ndm->ndm_type = neigh->type;
2140 ndm->ndm_ifindex = neigh->dev->ifindex;
2141
2142 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2143 goto nla_put_failure;
2144
2145 read_lock_bh(&neigh->lock);
2146 ndm->ndm_state = neigh->nud_state;
2147 if (neigh->nud_state & NUD_VALID) {
2148 char haddr[MAX_ADDR_LEN];
2149
2150 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2151 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2152 read_unlock_bh(&neigh->lock);
2153 goto nla_put_failure;
2154 }
2155 }
2156
2157 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2158 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2159 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2160 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2161 read_unlock_bh(&neigh->lock);
2162
2163 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2164 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2165 goto nla_put_failure;
2166
2167 return nlmsg_end(skb, nlh);
2168
2169nla_put_failure:
2170 nlmsg_cancel(skb, nlh);
2171 return -EMSGSIZE;
2172}
2173
2174static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2175 u32 pid, u32 seq, int type, unsigned int flags,
2176 struct neigh_table *tbl)
2177{
2178 struct nlmsghdr *nlh;
2179 struct ndmsg *ndm;
2180
2181 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2182 if (nlh == NULL)
2183 return -EMSGSIZE;
2184
2185 ndm = nlmsg_data(nlh);
2186 ndm->ndm_family = tbl->family;
2187 ndm->ndm_pad1 = 0;
2188 ndm->ndm_pad2 = 0;
2189 ndm->ndm_flags = pn->flags | NTF_PROXY;
2190 ndm->ndm_type = NDA_DST;
2191 ndm->ndm_ifindex = pn->dev->ifindex;
2192 ndm->ndm_state = NUD_NONE;
2193
2194 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2195 goto nla_put_failure;
2196
2197 return nlmsg_end(skb, nlh);
2198
2199nla_put_failure:
2200 nlmsg_cancel(skb, nlh);
2201 return -EMSGSIZE;
2202}
2203
2204static void neigh_update_notify(struct neighbour *neigh)
2205{
2206 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2207 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2208}
2209
2210static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2211 struct netlink_callback *cb)
2212{
2213 struct net *net = sock_net(skb->sk);
2214 struct neighbour *n;
2215 int rc, h, s_h = cb->args[1];
2216 int idx, s_idx = idx = cb->args[2];
2217 struct neigh_hash_table *nht;
2218
2219 rcu_read_lock_bh();
2220 nht = rcu_dereference_bh(tbl->nht);
2221
2222 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2223 if (h > s_h)
2224 s_idx = 0;
2225 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2226 n != NULL;
2227 n = rcu_dereference_bh(n->next)) {
2228 if (!net_eq(dev_net(n->dev), net))
2229 continue;
2230 if (idx < s_idx)
2231 goto next;
2232 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2233 cb->nlh->nlmsg_seq,
2234 RTM_NEWNEIGH,
2235 NLM_F_MULTI) <= 0) {
2236 rc = -1;
2237 goto out;
2238 }
2239next:
2240 idx++;
2241 }
2242 }
2243 rc = skb->len;
2244out:
2245 rcu_read_unlock_bh();
2246 cb->args[1] = h;
2247 cb->args[2] = idx;
2248 return rc;
2249}
2250
2251static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2252 struct netlink_callback *cb)
2253{
2254 struct pneigh_entry *n;
2255 struct net *net = sock_net(skb->sk);
2256 int rc, h, s_h = cb->args[3];
2257 int idx, s_idx = idx = cb->args[4];
2258
2259 read_lock_bh(&tbl->lock);
2260
2261 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2262 if (h > s_h)
2263 s_idx = 0;
2264 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2265 if (dev_net(n->dev) != net)
2266 continue;
2267 if (idx < s_idx)
2268 goto next;
2269 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2270 cb->nlh->nlmsg_seq,
2271 RTM_NEWNEIGH,
2272 NLM_F_MULTI, tbl) <= 0) {
2273 read_unlock_bh(&tbl->lock);
2274 rc = -1;
2275 goto out;
2276 }
2277 next:
2278 idx++;
2279 }
2280 }
2281
2282 read_unlock_bh(&tbl->lock);
2283 rc = skb->len;
2284out:
2285 cb->args[3] = h;
2286 cb->args[4] = idx;
2287 return rc;
2288
2289}
2290
2291static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2292{
2293 struct neigh_table *tbl;
2294 int t, family, s_t;
2295 int proxy = 0;
2296 int err;
2297
2298 read_lock(&neigh_tbl_lock);
2299 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2300
2301 /* check for full ndmsg structure presence, family member is
2302 * the same for both structures
2303 */
2304 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2305 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2306 proxy = 1;
2307
2308 s_t = cb->args[0];
2309
2310 for (tbl = neigh_tables, t = 0; tbl;
2311 tbl = tbl->next, t++) {
2312 if (t < s_t || (family && tbl->family != family))
2313 continue;
2314 if (t > s_t)
2315 memset(&cb->args[1], 0, sizeof(cb->args) -
2316 sizeof(cb->args[0]));
2317 if (proxy)
2318 err = pneigh_dump_table(tbl, skb, cb);
2319 else
2320 err = neigh_dump_table(tbl, skb, cb);
2321 if (err < 0)
2322 break;
2323 }
2324 read_unlock(&neigh_tbl_lock);
2325
2326 cb->args[0] = t;
2327 return skb->len;
2328}
2329
2330void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2331{
2332 int chain;
2333 struct neigh_hash_table *nht;
2334
2335 rcu_read_lock_bh();
2336 nht = rcu_dereference_bh(tbl->nht);
2337
2338 read_lock(&tbl->lock); /* avoid resizes */
2339 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2340 struct neighbour *n;
2341
2342 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2343 n != NULL;
2344 n = rcu_dereference_bh(n->next))
2345 cb(n, cookie);
2346 }
2347 read_unlock(&tbl->lock);
2348 rcu_read_unlock_bh();
2349}
2350EXPORT_SYMBOL(neigh_for_each);
2351
2352/* The tbl->lock must be held as a writer and BH disabled. */
2353void __neigh_for_each_release(struct neigh_table *tbl,
2354 int (*cb)(struct neighbour *))
2355{
2356 int chain;
2357 struct neigh_hash_table *nht;
2358
2359 nht = rcu_dereference_protected(tbl->nht,
2360 lockdep_is_held(&tbl->lock));
2361 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2362 struct neighbour *n;
2363 struct neighbour __rcu **np;
2364
2365 np = &nht->hash_buckets[chain];
2366 while ((n = rcu_dereference_protected(*np,
2367 lockdep_is_held(&tbl->lock))) != NULL) {
2368 int release;
2369
2370 write_lock(&n->lock);
2371 release = cb(n);
2372 if (release) {
2373 rcu_assign_pointer(*np,
2374 rcu_dereference_protected(n->next,
2375 lockdep_is_held(&tbl->lock)));
2376 n->dead = 1;
2377 } else
2378 np = &n->next;
2379 write_unlock(&n->lock);
2380 if (release)
2381 neigh_cleanup_and_release(n);
2382 }
2383 }
2384}
2385EXPORT_SYMBOL(__neigh_for_each_release);
2386
2387#ifdef CONFIG_PROC_FS
2388
2389static struct neighbour *neigh_get_first(struct seq_file *seq)
2390{
2391 struct neigh_seq_state *state = seq->private;
2392 struct net *net = seq_file_net(seq);
2393 struct neigh_hash_table *nht = state->nht;
2394 struct neighbour *n = NULL;
2395 int bucket = state->bucket;
2396
2397 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2398 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2399 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2400
2401 while (n) {
2402 if (!net_eq(dev_net(n->dev), net))
2403 goto next;
2404 if (state->neigh_sub_iter) {
2405 loff_t fakep = 0;
2406 void *v;
2407
2408 v = state->neigh_sub_iter(state, n, &fakep);
2409 if (!v)
2410 goto next;
2411 }
2412 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2413 break;
2414 if (n->nud_state & ~NUD_NOARP)
2415 break;
2416next:
2417 n = rcu_dereference_bh(n->next);
2418 }
2419
2420 if (n)
2421 break;
2422 }
2423 state->bucket = bucket;
2424
2425 return n;
2426}
2427
2428static struct neighbour *neigh_get_next(struct seq_file *seq,
2429 struct neighbour *n,
2430 loff_t *pos)
2431{
2432 struct neigh_seq_state *state = seq->private;
2433 struct net *net = seq_file_net(seq);
2434 struct neigh_hash_table *nht = state->nht;
2435
2436 if (state->neigh_sub_iter) {
2437 void *v = state->neigh_sub_iter(state, n, pos);
2438 if (v)
2439 return n;
2440 }
2441 n = rcu_dereference_bh(n->next);
2442
2443 while (1) {
2444 while (n) {
2445 if (!net_eq(dev_net(n->dev), net))
2446 goto next;
2447 if (state->neigh_sub_iter) {
2448 void *v = state->neigh_sub_iter(state, n, pos);
2449 if (v)
2450 return n;
2451 goto next;
2452 }
2453 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2454 break;
2455
2456 if (n->nud_state & ~NUD_NOARP)
2457 break;
2458next:
2459 n = rcu_dereference_bh(n->next);
2460 }
2461
2462 if (n)
2463 break;
2464
2465 if (++state->bucket >= (1 << nht->hash_shift))
2466 break;
2467
2468 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2469 }
2470
2471 if (n && pos)
2472 --(*pos);
2473 return n;
2474}
2475
2476static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2477{
2478 struct neighbour *n = neigh_get_first(seq);
2479
2480 if (n) {
2481 --(*pos);
2482 while (*pos) {
2483 n = neigh_get_next(seq, n, pos);
2484 if (!n)
2485 break;
2486 }
2487 }
2488 return *pos ? NULL : n;
2489}
2490
2491static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2492{
2493 struct neigh_seq_state *state = seq->private;
2494 struct net *net = seq_file_net(seq);
2495 struct neigh_table *tbl = state->tbl;
2496 struct pneigh_entry *pn = NULL;
2497 int bucket = state->bucket;
2498
2499 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2500 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2501 pn = tbl->phash_buckets[bucket];
2502 while (pn && !net_eq(pneigh_net(pn), net))
2503 pn = pn->next;
2504 if (pn)
2505 break;
2506 }
2507 state->bucket = bucket;
2508
2509 return pn;
2510}
2511
2512static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2513 struct pneigh_entry *pn,
2514 loff_t *pos)
2515{
2516 struct neigh_seq_state *state = seq->private;
2517 struct net *net = seq_file_net(seq);
2518 struct neigh_table *tbl = state->tbl;
2519
2520 do {
2521 pn = pn->next;
2522 } while (pn && !net_eq(pneigh_net(pn), net));
2523
2524 while (!pn) {
2525 if (++state->bucket > PNEIGH_HASHMASK)
2526 break;
2527 pn = tbl->phash_buckets[state->bucket];
2528 while (pn && !net_eq(pneigh_net(pn), net))
2529 pn = pn->next;
2530 if (pn)
2531 break;
2532 }
2533
2534 if (pn && pos)
2535 --(*pos);
2536
2537 return pn;
2538}
2539
2540static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2541{
2542 struct pneigh_entry *pn = pneigh_get_first(seq);
2543
2544 if (pn) {
2545 --(*pos);
2546 while (*pos) {
2547 pn = pneigh_get_next(seq, pn, pos);
2548 if (!pn)
2549 break;
2550 }
2551 }
2552 return *pos ? NULL : pn;
2553}
2554
2555static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2556{
2557 struct neigh_seq_state *state = seq->private;
2558 void *rc;
2559 loff_t idxpos = *pos;
2560
2561 rc = neigh_get_idx(seq, &idxpos);
2562 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2563 rc = pneigh_get_idx(seq, &idxpos);
2564
2565 return rc;
2566}
2567
2568void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2569 __acquires(rcu_bh)
2570{
2571 struct neigh_seq_state *state = seq->private;
2572
2573 state->tbl = tbl;
2574 state->bucket = 0;
2575 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2576
2577 rcu_read_lock_bh();
2578 state->nht = rcu_dereference_bh(tbl->nht);
2579
2580 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2581}
2582EXPORT_SYMBOL(neigh_seq_start);
2583
2584void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2585{
2586 struct neigh_seq_state *state;
2587 void *rc;
2588
2589 if (v == SEQ_START_TOKEN) {
2590 rc = neigh_get_first(seq);
2591 goto out;
2592 }
2593
2594 state = seq->private;
2595 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2596 rc = neigh_get_next(seq, v, NULL);
2597 if (rc)
2598 goto out;
2599 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2600 rc = pneigh_get_first(seq);
2601 } else {
2602 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2603 rc = pneigh_get_next(seq, v, NULL);
2604 }
2605out:
2606 ++(*pos);
2607 return rc;
2608}
2609EXPORT_SYMBOL(neigh_seq_next);
2610
2611void neigh_seq_stop(struct seq_file *seq, void *v)
2612 __releases(rcu_bh)
2613{
2614 rcu_read_unlock_bh();
2615}
2616EXPORT_SYMBOL(neigh_seq_stop);
2617
2618/* statistics via seq_file */
2619
2620static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2621{
2622 struct neigh_table *tbl = seq->private;
2623 int cpu;
2624
2625 if (*pos == 0)
2626 return SEQ_START_TOKEN;
2627
2628 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2629 if (!cpu_possible(cpu))
2630 continue;
2631 *pos = cpu+1;
2632 return per_cpu_ptr(tbl->stats, cpu);
2633 }
2634 return NULL;
2635}
2636
2637static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2638{
2639 struct neigh_table *tbl = seq->private;
2640 int cpu;
2641
2642 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2643 if (!cpu_possible(cpu))
2644 continue;
2645 *pos = cpu+1;
2646 return per_cpu_ptr(tbl->stats, cpu);
2647 }
2648 return NULL;
2649}
2650
2651static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2652{
2653
2654}
2655
2656static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2657{
2658 struct neigh_table *tbl = seq->private;
2659 struct neigh_statistics *st = v;
2660
2661 if (v == SEQ_START_TOKEN) {
2662 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2663 return 0;
2664 }
2665
2666 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2667 "%08lx %08lx %08lx %08lx %08lx\n",
2668 atomic_read(&tbl->entries),
2669
2670 st->allocs,
2671 st->destroys,
2672 st->hash_grows,
2673
2674 st->lookups,
2675 st->hits,
2676
2677 st->res_failed,
2678
2679 st->rcv_probes_mcast,
2680 st->rcv_probes_ucast,
2681
2682 st->periodic_gc_runs,
2683 st->forced_gc_runs,
2684 st->unres_discards
2685 );
2686
2687 return 0;
2688}
2689
2690static const struct seq_operations neigh_stat_seq_ops = {
2691 .start = neigh_stat_seq_start,
2692 .next = neigh_stat_seq_next,
2693 .stop = neigh_stat_seq_stop,
2694 .show = neigh_stat_seq_show,
2695};
2696
2697static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2698{
2699 int ret = seq_open(file, &neigh_stat_seq_ops);
2700
2701 if (!ret) {
2702 struct seq_file *sf = file->private_data;
2703 sf->private = PDE(inode)->data;
2704 }
2705 return ret;
2706};
2707
2708static const struct file_operations neigh_stat_seq_fops = {
2709 .owner = THIS_MODULE,
2710 .open = neigh_stat_seq_open,
2711 .read = seq_read,
2712 .llseek = seq_lseek,
2713 .release = seq_release,
2714};
2715
2716#endif /* CONFIG_PROC_FS */
2717
2718static inline size_t neigh_nlmsg_size(void)
2719{
2720 return NLMSG_ALIGN(sizeof(struct ndmsg))
2721 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2722 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2723 + nla_total_size(sizeof(struct nda_cacheinfo))
2724 + nla_total_size(4); /* NDA_PROBES */
2725}
2726
2727static void __neigh_notify(struct neighbour *n, int type, int flags)
2728{
2729 struct net *net = dev_net(n->dev);
2730 struct sk_buff *skb;
2731 int err = -ENOBUFS;
2732
2733 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2734 if (skb == NULL)
2735 goto errout;
2736
2737 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2738 if (err < 0) {
2739 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2740 WARN_ON(err == -EMSGSIZE);
2741 kfree_skb(skb);
2742 goto errout;
2743 }
2744 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2745 return;
2746errout:
2747 if (err < 0)
2748 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2749}
2750
2751#ifdef CONFIG_ARPD
2752void neigh_app_ns(struct neighbour *n)
2753{
2754 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2755}
2756EXPORT_SYMBOL(neigh_app_ns);
2757#endif /* CONFIG_ARPD */
2758
2759#ifdef CONFIG_SYSCTL
2760
2761static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2762 size_t *lenp, loff_t *ppos)
2763{
2764 int size, ret;
2765 ctl_table tmp = *ctl;
2766
2767 tmp.data = &size;
2768 size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
2769 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
2770 if (write && !ret)
2771 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2772 return ret;
2773}
2774
2775enum {
2776 NEIGH_VAR_MCAST_PROBE,
2777 NEIGH_VAR_UCAST_PROBE,
2778 NEIGH_VAR_APP_PROBE,
2779 NEIGH_VAR_RETRANS_TIME,
2780 NEIGH_VAR_BASE_REACHABLE_TIME,
2781 NEIGH_VAR_DELAY_PROBE_TIME,
2782 NEIGH_VAR_GC_STALETIME,
2783 NEIGH_VAR_QUEUE_LEN,
2784 NEIGH_VAR_QUEUE_LEN_BYTES,
2785 NEIGH_VAR_PROXY_QLEN,
2786 NEIGH_VAR_ANYCAST_DELAY,
2787 NEIGH_VAR_PROXY_DELAY,
2788 NEIGH_VAR_LOCKTIME,
2789 NEIGH_VAR_RETRANS_TIME_MS,
2790 NEIGH_VAR_BASE_REACHABLE_TIME_MS,
2791 NEIGH_VAR_GC_INTERVAL,
2792 NEIGH_VAR_GC_THRESH1,
2793 NEIGH_VAR_GC_THRESH2,
2794 NEIGH_VAR_GC_THRESH3,
2795 NEIGH_VAR_MAX
2796};
2797
2798static struct neigh_sysctl_table {
2799 struct ctl_table_header *sysctl_header;
2800 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2801} neigh_sysctl_template __read_mostly = {
2802 .neigh_vars = {
2803 [NEIGH_VAR_MCAST_PROBE] = {
2804 .procname = "mcast_solicit",
2805 .maxlen = sizeof(int),
2806 .mode = 0644,
2807 .proc_handler = proc_dointvec,
2808 },
2809 [NEIGH_VAR_UCAST_PROBE] = {
2810 .procname = "ucast_solicit",
2811 .maxlen = sizeof(int),
2812 .mode = 0644,
2813 .proc_handler = proc_dointvec,
2814 },
2815 [NEIGH_VAR_APP_PROBE] = {
2816 .procname = "app_solicit",
2817 .maxlen = sizeof(int),
2818 .mode = 0644,
2819 .proc_handler = proc_dointvec,
2820 },
2821 [NEIGH_VAR_RETRANS_TIME] = {
2822 .procname = "retrans_time",
2823 .maxlen = sizeof(int),
2824 .mode = 0644,
2825 .proc_handler = proc_dointvec_userhz_jiffies,
2826 },
2827 [NEIGH_VAR_BASE_REACHABLE_TIME] = {
2828 .procname = "base_reachable_time",
2829 .maxlen = sizeof(int),
2830 .mode = 0644,
2831 .proc_handler = proc_dointvec_jiffies,
2832 },
2833 [NEIGH_VAR_DELAY_PROBE_TIME] = {
2834 .procname = "delay_first_probe_time",
2835 .maxlen = sizeof(int),
2836 .mode = 0644,
2837 .proc_handler = proc_dointvec_jiffies,
2838 },
2839 [NEIGH_VAR_GC_STALETIME] = {
2840 .procname = "gc_stale_time",
2841 .maxlen = sizeof(int),
2842 .mode = 0644,
2843 .proc_handler = proc_dointvec_jiffies,
2844 },
2845 [NEIGH_VAR_QUEUE_LEN] = {
2846 .procname = "unres_qlen",
2847 .maxlen = sizeof(int),
2848 .mode = 0644,
2849 .proc_handler = proc_unres_qlen,
2850 },
2851 [NEIGH_VAR_QUEUE_LEN_BYTES] = {
2852 .procname = "unres_qlen_bytes",
2853 .maxlen = sizeof(int),
2854 .mode = 0644,
2855 .proc_handler = proc_dointvec,
2856 },
2857 [NEIGH_VAR_PROXY_QLEN] = {
2858 .procname = "proxy_qlen",
2859 .maxlen = sizeof(int),
2860 .mode = 0644,
2861 .proc_handler = proc_dointvec,
2862 },
2863 [NEIGH_VAR_ANYCAST_DELAY] = {
2864 .procname = "anycast_delay",
2865 .maxlen = sizeof(int),
2866 .mode = 0644,
2867 .proc_handler = proc_dointvec_userhz_jiffies,
2868 },
2869 [NEIGH_VAR_PROXY_DELAY] = {
2870 .procname = "proxy_delay",
2871 .maxlen = sizeof(int),
2872 .mode = 0644,
2873 .proc_handler = proc_dointvec_userhz_jiffies,
2874 },
2875 [NEIGH_VAR_LOCKTIME] = {
2876 .procname = "locktime",
2877 .maxlen = sizeof(int),
2878 .mode = 0644,
2879 .proc_handler = proc_dointvec_userhz_jiffies,
2880 },
2881 [NEIGH_VAR_RETRANS_TIME_MS] = {
2882 .procname = "retrans_time_ms",
2883 .maxlen = sizeof(int),
2884 .mode = 0644,
2885 .proc_handler = proc_dointvec_ms_jiffies,
2886 },
2887 [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
2888 .procname = "base_reachable_time_ms",
2889 .maxlen = sizeof(int),
2890 .mode = 0644,
2891 .proc_handler = proc_dointvec_ms_jiffies,
2892 },
2893 [NEIGH_VAR_GC_INTERVAL] = {
2894 .procname = "gc_interval",
2895 .maxlen = sizeof(int),
2896 .mode = 0644,
2897 .proc_handler = proc_dointvec_jiffies,
2898 },
2899 [NEIGH_VAR_GC_THRESH1] = {
2900 .procname = "gc_thresh1",
2901 .maxlen = sizeof(int),
2902 .mode = 0644,
2903 .proc_handler = proc_dointvec,
2904 },
2905 [NEIGH_VAR_GC_THRESH2] = {
2906 .procname = "gc_thresh2",
2907 .maxlen = sizeof(int),
2908 .mode = 0644,
2909 .proc_handler = proc_dointvec,
2910 },
2911 [NEIGH_VAR_GC_THRESH3] = {
2912 .procname = "gc_thresh3",
2913 .maxlen = sizeof(int),
2914 .mode = 0644,
2915 .proc_handler = proc_dointvec,
2916 },
2917 {},
2918 },
2919};
2920
2921int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2922 char *p_name, proc_handler *handler)
2923{
2924 struct neigh_sysctl_table *t;
2925 const char *dev_name_source = NULL;
2926 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
2927
2928 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2929 if (!t)
2930 goto err;
2931
2932 t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
2933 t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
2934 t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
2935 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
2936 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
2937 t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
2938 t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
2939 t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
2940 t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
2941 t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
2942 t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
2943 t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
2944 t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
2945 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
2946 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
2947
2948 if (dev) {
2949 dev_name_source = dev->name;
2950 /* Terminate the table early */
2951 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2952 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2953 } else {
2954 dev_name_source = "default";
2955 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2956 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2957 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
2958 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
2959 }
2960
2961
2962 if (handler) {
2963 /* RetransTime */
2964 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
2965 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
2966 /* ReachableTime */
2967 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
2968 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
2969 /* RetransTime (in milliseconds)*/
2970 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
2971 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
2972 /* ReachableTime (in milliseconds) */
2973 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
2974 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2975 }
2976
2977 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
2978 p_name, dev_name_source);
2979 t->sysctl_header =
2980 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
2981 if (!t->sysctl_header)
2982 goto free;
2983
2984 p->sysctl_table = t;
2985 return 0;
2986
2987free:
2988 kfree(t);
2989err:
2990 return -ENOBUFS;
2991}
2992EXPORT_SYMBOL(neigh_sysctl_register);
2993
2994void neigh_sysctl_unregister(struct neigh_parms *p)
2995{
2996 if (p->sysctl_table) {
2997 struct neigh_sysctl_table *t = p->sysctl_table;
2998 p->sysctl_table = NULL;
2999 unregister_net_sysctl_table(t->sysctl_header);
3000 kfree(t);
3001 }
3002}
3003EXPORT_SYMBOL(neigh_sysctl_unregister);
3004
3005#endif /* CONFIG_SYSCTL */
3006
3007static int __init neigh_init(void)
3008{
3009 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3010 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3011 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3012
3013 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3014 NULL);
3015 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3016
3017 return 0;
3018}
3019
3020subsys_initcall(neigh_init);
3021