Loading...
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/socket.h>
23#include <linux/netdevice.h>
24#include <linux/proc_fs.h>
25#ifdef CONFIG_SYSCTL
26#include <linux/sysctl.h>
27#endif
28#include <linux/times.h>
29#include <net/net_namespace.h>
30#include <net/neighbour.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/netevent.h>
34#include <net/netlink.h>
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
37#include <linux/string.h>
38#include <linux/log2.h>
39
40#define NEIGH_DEBUG 1
41
42#define NEIGH_PRINTK(x...) printk(x)
43#define NEIGH_NOPRINTK(x...) do { ; } while(0)
44#define NEIGH_PRINTK1 NEIGH_NOPRINTK
45#define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47#if NEIGH_DEBUG >= 1
48#undef NEIGH_PRINTK1
49#define NEIGH_PRINTK1 NEIGH_PRINTK
50#endif
51#if NEIGH_DEBUG >= 2
52#undef NEIGH_PRINTK2
53#define NEIGH_PRINTK2 NEIGH_PRINTK
54#endif
55
56#define PNEIGH_HASHMASK 0xF
57
58static void neigh_timer_handler(unsigned long arg);
59static void __neigh_notify(struct neighbour *n, int type, int flags);
60static void neigh_update_notify(struct neighbour *neigh);
61static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63static struct neigh_table *neigh_tables;
64#ifdef CONFIG_PROC_FS
65static const struct file_operations neigh_stat_seq_fops;
66#endif
67
68/*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99static DEFINE_RWLOCK(neigh_tbl_lock);
100
101static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
102{
103 kfree_skb(skb);
104 return -ENETDOWN;
105}
106
107static void neigh_cleanup_and_release(struct neighbour *neigh)
108{
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
111
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
114}
115
116/*
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
120 */
121
122unsigned long neigh_rand_reach_time(unsigned long base)
123{
124 return base ? (net_random() % base) + (base >> 1) : 0;
125}
126EXPORT_SYMBOL(neigh_rand_reach_time);
127
128
129static int neigh_forced_gc(struct neigh_table *tbl)
130{
131 int shrunk = 0;
132 int i;
133 struct neigh_hash_table *nht;
134
135 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
136
137 write_lock_bh(&tbl->lock);
138 nht = rcu_dereference_protected(tbl->nht,
139 lockdep_is_held(&tbl->lock));
140 for (i = 0; i < (1 << nht->hash_shift); i++) {
141 struct neighbour *n;
142 struct neighbour __rcu **np;
143
144 np = &nht->hash_buckets[i];
145 while ((n = rcu_dereference_protected(*np,
146 lockdep_is_held(&tbl->lock))) != NULL) {
147 /* Neighbour record may be discarded if:
148 * - nobody refers to it.
149 * - it is not permanent
150 */
151 write_lock(&n->lock);
152 if (atomic_read(&n->refcnt) == 1 &&
153 !(n->nud_state & NUD_PERMANENT)) {
154 rcu_assign_pointer(*np,
155 rcu_dereference_protected(n->next,
156 lockdep_is_held(&tbl->lock)));
157 n->dead = 1;
158 shrunk = 1;
159 write_unlock(&n->lock);
160 neigh_cleanup_and_release(n);
161 continue;
162 }
163 write_unlock(&n->lock);
164 np = &n->next;
165 }
166 }
167
168 tbl->last_flush = jiffies;
169
170 write_unlock_bh(&tbl->lock);
171
172 return shrunk;
173}
174
175static void neigh_add_timer(struct neighbour *n, unsigned long when)
176{
177 neigh_hold(n);
178 if (unlikely(mod_timer(&n->timer, when))) {
179 printk("NEIGH: BUG, double timer add, state is %x\n",
180 n->nud_state);
181 dump_stack();
182 }
183}
184
185static int neigh_del_timer(struct neighbour *n)
186{
187 if ((n->nud_state & NUD_IN_TIMER) &&
188 del_timer(&n->timer)) {
189 neigh_release(n);
190 return 1;
191 }
192 return 0;
193}
194
195static void pneigh_queue_purge(struct sk_buff_head *list)
196{
197 struct sk_buff *skb;
198
199 while ((skb = skb_dequeue(list)) != NULL) {
200 dev_put(skb->dev);
201 kfree_skb(skb);
202 }
203}
204
205static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
206{
207 int i;
208 struct neigh_hash_table *nht;
209
210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock));
212
213 for (i = 0; i < (1 << nht->hash_shift); i++) {
214 struct neighbour *n;
215 struct neighbour __rcu **np = &nht->hash_buckets[i];
216
217 while ((n = rcu_dereference_protected(*np,
218 lockdep_is_held(&tbl->lock))) != NULL) {
219 if (dev && n->dev != dev) {
220 np = &n->next;
221 continue;
222 }
223 rcu_assign_pointer(*np,
224 rcu_dereference_protected(n->next,
225 lockdep_is_held(&tbl->lock)));
226 write_lock(&n->lock);
227 neigh_del_timer(n);
228 n->dead = 1;
229
230 if (atomic_read(&n->refcnt) != 1) {
231 /* The most unpleasant situation.
232 We must destroy neighbour entry,
233 but someone still uses it.
234
235 The destroy will be delayed until
236 the last user releases us, but
237 we must kill timers etc. and move
238 it to safe state.
239 */
240 skb_queue_purge(&n->arp_queue);
241 n->output = neigh_blackhole;
242 if (n->nud_state & NUD_VALID)
243 n->nud_state = NUD_NOARP;
244 else
245 n->nud_state = NUD_NONE;
246 NEIGH_PRINTK2("neigh %p is stray.\n", n);
247 }
248 write_unlock(&n->lock);
249 neigh_cleanup_and_release(n);
250 }
251 }
252}
253
254void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
255{
256 write_lock_bh(&tbl->lock);
257 neigh_flush_dev(tbl, dev);
258 write_unlock_bh(&tbl->lock);
259}
260EXPORT_SYMBOL(neigh_changeaddr);
261
262int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
263{
264 write_lock_bh(&tbl->lock);
265 neigh_flush_dev(tbl, dev);
266 pneigh_ifdown(tbl, dev);
267 write_unlock_bh(&tbl->lock);
268
269 del_timer_sync(&tbl->proxy_timer);
270 pneigh_queue_purge(&tbl->proxy_queue);
271 return 0;
272}
273EXPORT_SYMBOL(neigh_ifdown);
274
275static struct neighbour *neigh_alloc(struct neigh_table *tbl)
276{
277 struct neighbour *n = NULL;
278 unsigned long now = jiffies;
279 int entries;
280
281 entries = atomic_inc_return(&tbl->entries) - 1;
282 if (entries >= tbl->gc_thresh3 ||
283 (entries >= tbl->gc_thresh2 &&
284 time_after(now, tbl->last_flush + 5 * HZ))) {
285 if (!neigh_forced_gc(tbl) &&
286 entries >= tbl->gc_thresh3)
287 goto out_entries;
288 }
289
290 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
291 if (!n)
292 goto out_entries;
293
294 skb_queue_head_init(&n->arp_queue);
295 rwlock_init(&n->lock);
296 seqlock_init(&n->ha_lock);
297 n->updated = n->used = now;
298 n->nud_state = NUD_NONE;
299 n->output = neigh_blackhole;
300 seqlock_init(&n->hh.hh_lock);
301 n->parms = neigh_parms_clone(&tbl->parms);
302 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
303
304 NEIGH_CACHE_STAT_INC(tbl, allocs);
305 n->tbl = tbl;
306 atomic_set(&n->refcnt, 1);
307 n->dead = 1;
308out:
309 return n;
310
311out_entries:
312 atomic_dec(&tbl->entries);
313 goto out;
314}
315
316static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
317{
318 size_t size = (1 << shift) * sizeof(struct neighbour *);
319 struct neigh_hash_table *ret;
320 struct neighbour __rcu **buckets;
321
322 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
323 if (!ret)
324 return NULL;
325 if (size <= PAGE_SIZE)
326 buckets = kzalloc(size, GFP_ATOMIC);
327 else
328 buckets = (struct neighbour __rcu **)
329 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
330 get_order(size));
331 if (!buckets) {
332 kfree(ret);
333 return NULL;
334 }
335 ret->hash_buckets = buckets;
336 ret->hash_shift = shift;
337 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
338 ret->hash_rnd |= 1;
339 return ret;
340}
341
342static void neigh_hash_free_rcu(struct rcu_head *head)
343{
344 struct neigh_hash_table *nht = container_of(head,
345 struct neigh_hash_table,
346 rcu);
347 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
348 struct neighbour __rcu **buckets = nht->hash_buckets;
349
350 if (size <= PAGE_SIZE)
351 kfree(buckets);
352 else
353 free_pages((unsigned long)buckets, get_order(size));
354 kfree(nht);
355}
356
357static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
358 unsigned long new_shift)
359{
360 unsigned int i, hash;
361 struct neigh_hash_table *new_nht, *old_nht;
362
363 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
364
365 old_nht = rcu_dereference_protected(tbl->nht,
366 lockdep_is_held(&tbl->lock));
367 new_nht = neigh_hash_alloc(new_shift);
368 if (!new_nht)
369 return old_nht;
370
371 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
372 struct neighbour *n, *next;
373
374 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
375 lockdep_is_held(&tbl->lock));
376 n != NULL;
377 n = next) {
378 hash = tbl->hash(n->primary_key, n->dev,
379 new_nht->hash_rnd);
380
381 hash >>= (32 - new_nht->hash_shift);
382 next = rcu_dereference_protected(n->next,
383 lockdep_is_held(&tbl->lock));
384
385 rcu_assign_pointer(n->next,
386 rcu_dereference_protected(
387 new_nht->hash_buckets[hash],
388 lockdep_is_held(&tbl->lock)));
389 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
390 }
391 }
392
393 rcu_assign_pointer(tbl->nht, new_nht);
394 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
395 return new_nht;
396}
397
398struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
399 struct net_device *dev)
400{
401 struct neighbour *n;
402 int key_len = tbl->key_len;
403 u32 hash_val;
404 struct neigh_hash_table *nht;
405
406 NEIGH_CACHE_STAT_INC(tbl, lookups);
407
408 rcu_read_lock_bh();
409 nht = rcu_dereference_bh(tbl->nht);
410 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
411
412 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
413 n != NULL;
414 n = rcu_dereference_bh(n->next)) {
415 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
416 if (!atomic_inc_not_zero(&n->refcnt))
417 n = NULL;
418 NEIGH_CACHE_STAT_INC(tbl, hits);
419 break;
420 }
421 }
422
423 rcu_read_unlock_bh();
424 return n;
425}
426EXPORT_SYMBOL(neigh_lookup);
427
428struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
429 const void *pkey)
430{
431 struct neighbour *n;
432 int key_len = tbl->key_len;
433 u32 hash_val;
434 struct neigh_hash_table *nht;
435
436 NEIGH_CACHE_STAT_INC(tbl, lookups);
437
438 rcu_read_lock_bh();
439 nht = rcu_dereference_bh(tbl->nht);
440 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
441
442 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
443 n != NULL;
444 n = rcu_dereference_bh(n->next)) {
445 if (!memcmp(n->primary_key, pkey, key_len) &&
446 net_eq(dev_net(n->dev), net)) {
447 if (!atomic_inc_not_zero(&n->refcnt))
448 n = NULL;
449 NEIGH_CACHE_STAT_INC(tbl, hits);
450 break;
451 }
452 }
453
454 rcu_read_unlock_bh();
455 return n;
456}
457EXPORT_SYMBOL(neigh_lookup_nodev);
458
459struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
460 struct net_device *dev)
461{
462 u32 hash_val;
463 int key_len = tbl->key_len;
464 int error;
465 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
466 struct neigh_hash_table *nht;
467
468 if (!n) {
469 rc = ERR_PTR(-ENOBUFS);
470 goto out;
471 }
472
473 memcpy(n->primary_key, pkey, key_len);
474 n->dev = dev;
475 dev_hold(dev);
476
477 /* Protocol specific setup. */
478 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
479 rc = ERR_PTR(error);
480 goto out_neigh_release;
481 }
482
483 /* Device specific setup. */
484 if (n->parms->neigh_setup &&
485 (error = n->parms->neigh_setup(n)) < 0) {
486 rc = ERR_PTR(error);
487 goto out_neigh_release;
488 }
489
490 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
491
492 write_lock_bh(&tbl->lock);
493 nht = rcu_dereference_protected(tbl->nht,
494 lockdep_is_held(&tbl->lock));
495
496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498
499 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500
501 if (n->parms->dead) {
502 rc = ERR_PTR(-EINVAL);
503 goto out_tbl_unlock;
504 }
505
506 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
507 lockdep_is_held(&tbl->lock));
508 n1 != NULL;
509 n1 = rcu_dereference_protected(n1->next,
510 lockdep_is_held(&tbl->lock))) {
511 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
512 neigh_hold(n1);
513 rc = n1;
514 goto out_tbl_unlock;
515 }
516 }
517
518 n->dead = 0;
519 neigh_hold(n);
520 rcu_assign_pointer(n->next,
521 rcu_dereference_protected(nht->hash_buckets[hash_val],
522 lockdep_is_held(&tbl->lock)));
523 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
524 write_unlock_bh(&tbl->lock);
525 NEIGH_PRINTK2("neigh %p is created.\n", n);
526 rc = n;
527out:
528 return rc;
529out_tbl_unlock:
530 write_unlock_bh(&tbl->lock);
531out_neigh_release:
532 neigh_release(n);
533 goto out;
534}
535EXPORT_SYMBOL(neigh_create);
536
537static u32 pneigh_hash(const void *pkey, int key_len)
538{
539 u32 hash_val = *(u32 *)(pkey + key_len - 4);
540 hash_val ^= (hash_val >> 16);
541 hash_val ^= hash_val >> 8;
542 hash_val ^= hash_val >> 4;
543 hash_val &= PNEIGH_HASHMASK;
544 return hash_val;
545}
546
547static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
548 struct net *net,
549 const void *pkey,
550 int key_len,
551 struct net_device *dev)
552{
553 while (n) {
554 if (!memcmp(n->key, pkey, key_len) &&
555 net_eq(pneigh_net(n), net) &&
556 (n->dev == dev || !n->dev))
557 return n;
558 n = n->next;
559 }
560 return NULL;
561}
562
563struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
564 struct net *net, const void *pkey, struct net_device *dev)
565{
566 int key_len = tbl->key_len;
567 u32 hash_val = pneigh_hash(pkey, key_len);
568
569 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
570 net, pkey, key_len, dev);
571}
572EXPORT_SYMBOL_GPL(__pneigh_lookup);
573
574struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
575 struct net *net, const void *pkey,
576 struct net_device *dev, int creat)
577{
578 struct pneigh_entry *n;
579 int key_len = tbl->key_len;
580 u32 hash_val = pneigh_hash(pkey, key_len);
581
582 read_lock_bh(&tbl->lock);
583 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
584 net, pkey, key_len, dev);
585 read_unlock_bh(&tbl->lock);
586
587 if (n || !creat)
588 goto out;
589
590 ASSERT_RTNL();
591
592 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
593 if (!n)
594 goto out;
595
596 write_pnet(&n->net, hold_net(net));
597 memcpy(n->key, pkey, key_len);
598 n->dev = dev;
599 if (dev)
600 dev_hold(dev);
601
602 if (tbl->pconstructor && tbl->pconstructor(n)) {
603 if (dev)
604 dev_put(dev);
605 release_net(net);
606 kfree(n);
607 n = NULL;
608 goto out;
609 }
610
611 write_lock_bh(&tbl->lock);
612 n->next = tbl->phash_buckets[hash_val];
613 tbl->phash_buckets[hash_val] = n;
614 write_unlock_bh(&tbl->lock);
615out:
616 return n;
617}
618EXPORT_SYMBOL(pneigh_lookup);
619
620
621int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
622 struct net_device *dev)
623{
624 struct pneigh_entry *n, **np;
625 int key_len = tbl->key_len;
626 u32 hash_val = pneigh_hash(pkey, key_len);
627
628 write_lock_bh(&tbl->lock);
629 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
630 np = &n->next) {
631 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
632 net_eq(pneigh_net(n), net)) {
633 *np = n->next;
634 write_unlock_bh(&tbl->lock);
635 if (tbl->pdestructor)
636 tbl->pdestructor(n);
637 if (n->dev)
638 dev_put(n->dev);
639 release_net(pneigh_net(n));
640 kfree(n);
641 return 0;
642 }
643 }
644 write_unlock_bh(&tbl->lock);
645 return -ENOENT;
646}
647
648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
649{
650 struct pneigh_entry *n, **np;
651 u32 h;
652
653 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
654 np = &tbl->phash_buckets[h];
655 while ((n = *np) != NULL) {
656 if (!dev || n->dev == dev) {
657 *np = n->next;
658 if (tbl->pdestructor)
659 tbl->pdestructor(n);
660 if (n->dev)
661 dev_put(n->dev);
662 release_net(pneigh_net(n));
663 kfree(n);
664 continue;
665 }
666 np = &n->next;
667 }
668 }
669 return -ENOENT;
670}
671
672static void neigh_parms_destroy(struct neigh_parms *parms);
673
674static inline void neigh_parms_put(struct neigh_parms *parms)
675{
676 if (atomic_dec_and_test(&parms->refcnt))
677 neigh_parms_destroy(parms);
678}
679
680static void neigh_destroy_rcu(struct rcu_head *head)
681{
682 struct neighbour *neigh = container_of(head, struct neighbour, rcu);
683
684 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
685}
686/*
687 * neighbour must already be out of the table;
688 *
689 */
690void neigh_destroy(struct neighbour *neigh)
691{
692 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
693
694 if (!neigh->dead) {
695 printk(KERN_WARNING
696 "Destroying alive neighbour %p\n", neigh);
697 dump_stack();
698 return;
699 }
700
701 if (neigh_del_timer(neigh))
702 printk(KERN_WARNING "Impossible event.\n");
703
704 skb_queue_purge(&neigh->arp_queue);
705
706 dev_put(neigh->dev);
707 neigh_parms_put(neigh->parms);
708
709 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
710
711 atomic_dec(&neigh->tbl->entries);
712 call_rcu(&neigh->rcu, neigh_destroy_rcu);
713}
714EXPORT_SYMBOL(neigh_destroy);
715
716/* Neighbour state is suspicious;
717 disable fast path.
718
719 Called with write_locked neigh.
720 */
721static void neigh_suspect(struct neighbour *neigh)
722{
723 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
724
725 neigh->output = neigh->ops->output;
726}
727
728/* Neighbour state is OK;
729 enable fast path.
730
731 Called with write_locked neigh.
732 */
733static void neigh_connect(struct neighbour *neigh)
734{
735 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
736
737 neigh->output = neigh->ops->connected_output;
738}
739
740static void neigh_periodic_work(struct work_struct *work)
741{
742 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
743 struct neighbour *n;
744 struct neighbour __rcu **np;
745 unsigned int i;
746 struct neigh_hash_table *nht;
747
748 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
749
750 write_lock_bh(&tbl->lock);
751 nht = rcu_dereference_protected(tbl->nht,
752 lockdep_is_held(&tbl->lock));
753
754 /*
755 * periodically recompute ReachableTime from random function
756 */
757
758 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
759 struct neigh_parms *p;
760 tbl->last_rand = jiffies;
761 for (p = &tbl->parms; p; p = p->next)
762 p->reachable_time =
763 neigh_rand_reach_time(p->base_reachable_time);
764 }
765
766 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
767 np = &nht->hash_buckets[i];
768
769 while ((n = rcu_dereference_protected(*np,
770 lockdep_is_held(&tbl->lock))) != NULL) {
771 unsigned int state;
772
773 write_lock(&n->lock);
774
775 state = n->nud_state;
776 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
777 write_unlock(&n->lock);
778 goto next_elt;
779 }
780
781 if (time_before(n->used, n->confirmed))
782 n->used = n->confirmed;
783
784 if (atomic_read(&n->refcnt) == 1 &&
785 (state == NUD_FAILED ||
786 time_after(jiffies, n->used + n->parms->gc_staletime))) {
787 *np = n->next;
788 n->dead = 1;
789 write_unlock(&n->lock);
790 neigh_cleanup_and_release(n);
791 continue;
792 }
793 write_unlock(&n->lock);
794
795next_elt:
796 np = &n->next;
797 }
798 /*
799 * It's fine to release lock here, even if hash table
800 * grows while we are preempted.
801 */
802 write_unlock_bh(&tbl->lock);
803 cond_resched();
804 write_lock_bh(&tbl->lock);
805 }
806 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
807 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
808 * base_reachable_time.
809 */
810 schedule_delayed_work(&tbl->gc_work,
811 tbl->parms.base_reachable_time >> 1);
812 write_unlock_bh(&tbl->lock);
813}
814
815static __inline__ int neigh_max_probes(struct neighbour *n)
816{
817 struct neigh_parms *p = n->parms;
818 return (n->nud_state & NUD_PROBE) ?
819 p->ucast_probes :
820 p->ucast_probes + p->app_probes + p->mcast_probes;
821}
822
823static void neigh_invalidate(struct neighbour *neigh)
824 __releases(neigh->lock)
825 __acquires(neigh->lock)
826{
827 struct sk_buff *skb;
828
829 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
830 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
831 neigh->updated = jiffies;
832
833 /* It is very thin place. report_unreachable is very complicated
834 routine. Particularly, it can hit the same neighbour entry!
835
836 So that, we try to be accurate and avoid dead loop. --ANK
837 */
838 while (neigh->nud_state == NUD_FAILED &&
839 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
840 write_unlock(&neigh->lock);
841 neigh->ops->error_report(neigh, skb);
842 write_lock(&neigh->lock);
843 }
844 skb_queue_purge(&neigh->arp_queue);
845}
846
847/* Called when a timer expires for a neighbour entry. */
848
849static void neigh_timer_handler(unsigned long arg)
850{
851 unsigned long now, next;
852 struct neighbour *neigh = (struct neighbour *)arg;
853 unsigned state;
854 int notify = 0;
855
856 write_lock(&neigh->lock);
857
858 state = neigh->nud_state;
859 now = jiffies;
860 next = now + HZ;
861
862 if (!(state & NUD_IN_TIMER)) {
863#ifndef CONFIG_SMP
864 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
865#endif
866 goto out;
867 }
868
869 if (state & NUD_REACHABLE) {
870 if (time_before_eq(now,
871 neigh->confirmed + neigh->parms->reachable_time)) {
872 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
873 next = neigh->confirmed + neigh->parms->reachable_time;
874 } else if (time_before_eq(now,
875 neigh->used + neigh->parms->delay_probe_time)) {
876 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
877 neigh->nud_state = NUD_DELAY;
878 neigh->updated = jiffies;
879 neigh_suspect(neigh);
880 next = now + neigh->parms->delay_probe_time;
881 } else {
882 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
883 neigh->nud_state = NUD_STALE;
884 neigh->updated = jiffies;
885 neigh_suspect(neigh);
886 notify = 1;
887 }
888 } else if (state & NUD_DELAY) {
889 if (time_before_eq(now,
890 neigh->confirmed + neigh->parms->delay_probe_time)) {
891 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
892 neigh->nud_state = NUD_REACHABLE;
893 neigh->updated = jiffies;
894 neigh_connect(neigh);
895 notify = 1;
896 next = neigh->confirmed + neigh->parms->reachable_time;
897 } else {
898 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
899 neigh->nud_state = NUD_PROBE;
900 neigh->updated = jiffies;
901 atomic_set(&neigh->probes, 0);
902 next = now + neigh->parms->retrans_time;
903 }
904 } else {
905 /* NUD_PROBE|NUD_INCOMPLETE */
906 next = now + neigh->parms->retrans_time;
907 }
908
909 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
910 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
911 neigh->nud_state = NUD_FAILED;
912 notify = 1;
913 neigh_invalidate(neigh);
914 }
915
916 if (neigh->nud_state & NUD_IN_TIMER) {
917 if (time_before(next, jiffies + HZ/2))
918 next = jiffies + HZ/2;
919 if (!mod_timer(&neigh->timer, next))
920 neigh_hold(neigh);
921 }
922 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
923 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
924 /* keep skb alive even if arp_queue overflows */
925 if (skb)
926 skb = skb_copy(skb, GFP_ATOMIC);
927 write_unlock(&neigh->lock);
928 neigh->ops->solicit(neigh, skb);
929 atomic_inc(&neigh->probes);
930 kfree_skb(skb);
931 } else {
932out:
933 write_unlock(&neigh->lock);
934 }
935
936 if (notify)
937 neigh_update_notify(neigh);
938
939 neigh_release(neigh);
940}
941
942int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
943{
944 int rc;
945 unsigned long now;
946
947 write_lock_bh(&neigh->lock);
948
949 rc = 0;
950 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
951 goto out_unlock_bh;
952
953 now = jiffies;
954
955 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
956 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
957 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
958 neigh->nud_state = NUD_INCOMPLETE;
959 neigh->updated = jiffies;
960 neigh_add_timer(neigh, now + 1);
961 } else {
962 neigh->nud_state = NUD_FAILED;
963 neigh->updated = jiffies;
964 write_unlock_bh(&neigh->lock);
965
966 kfree_skb(skb);
967 return 1;
968 }
969 } else if (neigh->nud_state & NUD_STALE) {
970 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
971 neigh->nud_state = NUD_DELAY;
972 neigh->updated = jiffies;
973 neigh_add_timer(neigh,
974 jiffies + neigh->parms->delay_probe_time);
975 }
976
977 if (neigh->nud_state == NUD_INCOMPLETE) {
978 if (skb) {
979 if (skb_queue_len(&neigh->arp_queue) >=
980 neigh->parms->queue_len) {
981 struct sk_buff *buff;
982 buff = __skb_dequeue(&neigh->arp_queue);
983 kfree_skb(buff);
984 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
985 }
986 skb_dst_force(skb);
987 __skb_queue_tail(&neigh->arp_queue, skb);
988 }
989 rc = 1;
990 }
991out_unlock_bh:
992 write_unlock_bh(&neigh->lock);
993 return rc;
994}
995EXPORT_SYMBOL(__neigh_event_send);
996
997static void neigh_update_hhs(struct neighbour *neigh)
998{
999 struct hh_cache *hh;
1000 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1001 = NULL;
1002
1003 if (neigh->dev->header_ops)
1004 update = neigh->dev->header_ops->cache_update;
1005
1006 if (update) {
1007 hh = &neigh->hh;
1008 if (hh->hh_len) {
1009 write_seqlock_bh(&hh->hh_lock);
1010 update(hh, neigh->dev, neigh->ha);
1011 write_sequnlock_bh(&hh->hh_lock);
1012 }
1013 }
1014}
1015
1016
1017
1018/* Generic update routine.
1019 -- lladdr is new lladdr or NULL, if it is not supplied.
1020 -- new is new state.
1021 -- flags
1022 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1023 if it is different.
1024 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1025 lladdr instead of overriding it
1026 if it is different.
1027 It also allows to retain current state
1028 if lladdr is unchanged.
1029 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1030
1031 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1032 NTF_ROUTER flag.
1033 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1034 a router.
1035
1036 Caller MUST hold reference count on the entry.
1037 */
1038
1039int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1040 u32 flags)
1041{
1042 u8 old;
1043 int err;
1044 int notify = 0;
1045 struct net_device *dev;
1046 int update_isrouter = 0;
1047
1048 write_lock_bh(&neigh->lock);
1049
1050 dev = neigh->dev;
1051 old = neigh->nud_state;
1052 err = -EPERM;
1053
1054 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1055 (old & (NUD_NOARP | NUD_PERMANENT)))
1056 goto out;
1057
1058 if (!(new & NUD_VALID)) {
1059 neigh_del_timer(neigh);
1060 if (old & NUD_CONNECTED)
1061 neigh_suspect(neigh);
1062 neigh->nud_state = new;
1063 err = 0;
1064 notify = old & NUD_VALID;
1065 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1066 (new & NUD_FAILED)) {
1067 neigh_invalidate(neigh);
1068 notify = 1;
1069 }
1070 goto out;
1071 }
1072
1073 /* Compare new lladdr with cached one */
1074 if (!dev->addr_len) {
1075 /* First case: device needs no address. */
1076 lladdr = neigh->ha;
1077 } else if (lladdr) {
1078 /* The second case: if something is already cached
1079 and a new address is proposed:
1080 - compare new & old
1081 - if they are different, check override flag
1082 */
1083 if ((old & NUD_VALID) &&
1084 !memcmp(lladdr, neigh->ha, dev->addr_len))
1085 lladdr = neigh->ha;
1086 } else {
1087 /* No address is supplied; if we know something,
1088 use it, otherwise discard the request.
1089 */
1090 err = -EINVAL;
1091 if (!(old & NUD_VALID))
1092 goto out;
1093 lladdr = neigh->ha;
1094 }
1095
1096 if (new & NUD_CONNECTED)
1097 neigh->confirmed = jiffies;
1098 neigh->updated = jiffies;
1099
1100 /* If entry was valid and address is not changed,
1101 do not change entry state, if new one is STALE.
1102 */
1103 err = 0;
1104 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1105 if (old & NUD_VALID) {
1106 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1107 update_isrouter = 0;
1108 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1109 (old & NUD_CONNECTED)) {
1110 lladdr = neigh->ha;
1111 new = NUD_STALE;
1112 } else
1113 goto out;
1114 } else {
1115 if (lladdr == neigh->ha && new == NUD_STALE &&
1116 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1117 (old & NUD_CONNECTED))
1118 )
1119 new = old;
1120 }
1121 }
1122
1123 if (new != old) {
1124 neigh_del_timer(neigh);
1125 if (new & NUD_IN_TIMER)
1126 neigh_add_timer(neigh, (jiffies +
1127 ((new & NUD_REACHABLE) ?
1128 neigh->parms->reachable_time :
1129 0)));
1130 neigh->nud_state = new;
1131 }
1132
1133 if (lladdr != neigh->ha) {
1134 write_seqlock(&neigh->ha_lock);
1135 memcpy(&neigh->ha, lladdr, dev->addr_len);
1136 write_sequnlock(&neigh->ha_lock);
1137 neigh_update_hhs(neigh);
1138 if (!(new & NUD_CONNECTED))
1139 neigh->confirmed = jiffies -
1140 (neigh->parms->base_reachable_time << 1);
1141 notify = 1;
1142 }
1143 if (new == old)
1144 goto out;
1145 if (new & NUD_CONNECTED)
1146 neigh_connect(neigh);
1147 else
1148 neigh_suspect(neigh);
1149 if (!(old & NUD_VALID)) {
1150 struct sk_buff *skb;
1151
1152 /* Again: avoid dead loop if something went wrong */
1153
1154 while (neigh->nud_state & NUD_VALID &&
1155 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1156 struct dst_entry *dst = skb_dst(skb);
1157 struct neighbour *n2, *n1 = neigh;
1158 write_unlock_bh(&neigh->lock);
1159 /* On shaper/eql skb->dst->neighbour != neigh :( */
1160 if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
1161 n1 = n2;
1162 n1->output(n1, skb);
1163 write_lock_bh(&neigh->lock);
1164 }
1165 skb_queue_purge(&neigh->arp_queue);
1166 }
1167out:
1168 if (update_isrouter) {
1169 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1170 (neigh->flags | NTF_ROUTER) :
1171 (neigh->flags & ~NTF_ROUTER);
1172 }
1173 write_unlock_bh(&neigh->lock);
1174
1175 if (notify)
1176 neigh_update_notify(neigh);
1177
1178 return err;
1179}
1180EXPORT_SYMBOL(neigh_update);
1181
1182struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1183 u8 *lladdr, void *saddr,
1184 struct net_device *dev)
1185{
1186 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1187 lladdr || !dev->addr_len);
1188 if (neigh)
1189 neigh_update(neigh, lladdr, NUD_STALE,
1190 NEIGH_UPDATE_F_OVERRIDE);
1191 return neigh;
1192}
1193EXPORT_SYMBOL(neigh_event_ns);
1194
1195/* called with read_lock_bh(&n->lock); */
1196static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1197{
1198 struct net_device *dev = dst->dev;
1199 __be16 prot = dst->ops->protocol;
1200 struct hh_cache *hh = &n->hh;
1201
1202 write_lock_bh(&n->lock);
1203
1204 /* Only one thread can come in here and initialize the
1205 * hh_cache entry.
1206 */
1207 if (!hh->hh_len)
1208 dev->header_ops->cache(n, hh, prot);
1209
1210 write_unlock_bh(&n->lock);
1211}
1212
1213/* This function can be used in contexts, where only old dev_queue_xmit
1214 * worked, f.e. if you want to override normal output path (eql, shaper),
1215 * but resolution is not made yet.
1216 */
1217
1218int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1219{
1220 struct net_device *dev = skb->dev;
1221
1222 __skb_pull(skb, skb_network_offset(skb));
1223
1224 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1225 skb->len) < 0 &&
1226 dev->header_ops->rebuild(skb))
1227 return 0;
1228
1229 return dev_queue_xmit(skb);
1230}
1231EXPORT_SYMBOL(neigh_compat_output);
1232
1233/* Slow and careful. */
1234
1235int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1236{
1237 struct dst_entry *dst = skb_dst(skb);
1238 int rc = 0;
1239
1240 if (!dst)
1241 goto discard;
1242
1243 __skb_pull(skb, skb_network_offset(skb));
1244
1245 if (!neigh_event_send(neigh, skb)) {
1246 int err;
1247 struct net_device *dev = neigh->dev;
1248 unsigned int seq;
1249
1250 if (dev->header_ops->cache && !neigh->hh.hh_len)
1251 neigh_hh_init(neigh, dst);
1252
1253 do {
1254 seq = read_seqbegin(&neigh->ha_lock);
1255 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1256 neigh->ha, NULL, skb->len);
1257 } while (read_seqretry(&neigh->ha_lock, seq));
1258
1259 if (err >= 0)
1260 rc = dev_queue_xmit(skb);
1261 else
1262 goto out_kfree_skb;
1263 }
1264out:
1265 return rc;
1266discard:
1267 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1268 dst, neigh);
1269out_kfree_skb:
1270 rc = -EINVAL;
1271 kfree_skb(skb);
1272 goto out;
1273}
1274EXPORT_SYMBOL(neigh_resolve_output);
1275
1276/* As fast as possible without hh cache */
1277
1278int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1279{
1280 struct net_device *dev = neigh->dev;
1281 unsigned int seq;
1282 int err;
1283
1284 __skb_pull(skb, skb_network_offset(skb));
1285
1286 do {
1287 seq = read_seqbegin(&neigh->ha_lock);
1288 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1289 neigh->ha, NULL, skb->len);
1290 } while (read_seqretry(&neigh->ha_lock, seq));
1291
1292 if (err >= 0)
1293 err = dev_queue_xmit(skb);
1294 else {
1295 err = -EINVAL;
1296 kfree_skb(skb);
1297 }
1298 return err;
1299}
1300EXPORT_SYMBOL(neigh_connected_output);
1301
1302int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1303{
1304 return dev_queue_xmit(skb);
1305}
1306EXPORT_SYMBOL(neigh_direct_output);
1307
1308static void neigh_proxy_process(unsigned long arg)
1309{
1310 struct neigh_table *tbl = (struct neigh_table *)arg;
1311 long sched_next = 0;
1312 unsigned long now = jiffies;
1313 struct sk_buff *skb, *n;
1314
1315 spin_lock(&tbl->proxy_queue.lock);
1316
1317 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1318 long tdif = NEIGH_CB(skb)->sched_next - now;
1319
1320 if (tdif <= 0) {
1321 struct net_device *dev = skb->dev;
1322
1323 __skb_unlink(skb, &tbl->proxy_queue);
1324 if (tbl->proxy_redo && netif_running(dev)) {
1325 rcu_read_lock();
1326 tbl->proxy_redo(skb);
1327 rcu_read_unlock();
1328 } else {
1329 kfree_skb(skb);
1330 }
1331
1332 dev_put(dev);
1333 } else if (!sched_next || tdif < sched_next)
1334 sched_next = tdif;
1335 }
1336 del_timer(&tbl->proxy_timer);
1337 if (sched_next)
1338 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1339 spin_unlock(&tbl->proxy_queue.lock);
1340}
1341
1342void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1343 struct sk_buff *skb)
1344{
1345 unsigned long now = jiffies;
1346 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1347
1348 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1349 kfree_skb(skb);
1350 return;
1351 }
1352
1353 NEIGH_CB(skb)->sched_next = sched_next;
1354 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1355
1356 spin_lock(&tbl->proxy_queue.lock);
1357 if (del_timer(&tbl->proxy_timer)) {
1358 if (time_before(tbl->proxy_timer.expires, sched_next))
1359 sched_next = tbl->proxy_timer.expires;
1360 }
1361 skb_dst_drop(skb);
1362 dev_hold(skb->dev);
1363 __skb_queue_tail(&tbl->proxy_queue, skb);
1364 mod_timer(&tbl->proxy_timer, sched_next);
1365 spin_unlock(&tbl->proxy_queue.lock);
1366}
1367EXPORT_SYMBOL(pneigh_enqueue);
1368
1369static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1370 struct net *net, int ifindex)
1371{
1372 struct neigh_parms *p;
1373
1374 for (p = &tbl->parms; p; p = p->next) {
1375 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1376 (!p->dev && !ifindex))
1377 return p;
1378 }
1379
1380 return NULL;
1381}
1382
1383struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1384 struct neigh_table *tbl)
1385{
1386 struct neigh_parms *p, *ref;
1387 struct net *net = dev_net(dev);
1388 const struct net_device_ops *ops = dev->netdev_ops;
1389
1390 ref = lookup_neigh_parms(tbl, net, 0);
1391 if (!ref)
1392 return NULL;
1393
1394 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1395 if (p) {
1396 p->tbl = tbl;
1397 atomic_set(&p->refcnt, 1);
1398 p->reachable_time =
1399 neigh_rand_reach_time(p->base_reachable_time);
1400
1401 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1402 kfree(p);
1403 return NULL;
1404 }
1405
1406 dev_hold(dev);
1407 p->dev = dev;
1408 write_pnet(&p->net, hold_net(net));
1409 p->sysctl_table = NULL;
1410 write_lock_bh(&tbl->lock);
1411 p->next = tbl->parms.next;
1412 tbl->parms.next = p;
1413 write_unlock_bh(&tbl->lock);
1414 }
1415 return p;
1416}
1417EXPORT_SYMBOL(neigh_parms_alloc);
1418
1419static void neigh_rcu_free_parms(struct rcu_head *head)
1420{
1421 struct neigh_parms *parms =
1422 container_of(head, struct neigh_parms, rcu_head);
1423
1424 neigh_parms_put(parms);
1425}
1426
1427void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1428{
1429 struct neigh_parms **p;
1430
1431 if (!parms || parms == &tbl->parms)
1432 return;
1433 write_lock_bh(&tbl->lock);
1434 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1435 if (*p == parms) {
1436 *p = parms->next;
1437 parms->dead = 1;
1438 write_unlock_bh(&tbl->lock);
1439 if (parms->dev)
1440 dev_put(parms->dev);
1441 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1442 return;
1443 }
1444 }
1445 write_unlock_bh(&tbl->lock);
1446 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1447}
1448EXPORT_SYMBOL(neigh_parms_release);
1449
1450static void neigh_parms_destroy(struct neigh_parms *parms)
1451{
1452 release_net(neigh_parms_net(parms));
1453 kfree(parms);
1454}
1455
1456static struct lock_class_key neigh_table_proxy_queue_class;
1457
1458void neigh_table_init_no_netlink(struct neigh_table *tbl)
1459{
1460 unsigned long now = jiffies;
1461 unsigned long phsize;
1462
1463 write_pnet(&tbl->parms.net, &init_net);
1464 atomic_set(&tbl->parms.refcnt, 1);
1465 tbl->parms.reachable_time =
1466 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1467
1468 if (!tbl->kmem_cachep)
1469 tbl->kmem_cachep =
1470 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1471 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1472 NULL);
1473 tbl->stats = alloc_percpu(struct neigh_statistics);
1474 if (!tbl->stats)
1475 panic("cannot create neighbour cache statistics");
1476
1477#ifdef CONFIG_PROC_FS
1478 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1479 &neigh_stat_seq_fops, tbl))
1480 panic("cannot create neighbour proc dir entry");
1481#endif
1482
1483 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1484
1485 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1486 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1487
1488 if (!tbl->nht || !tbl->phash_buckets)
1489 panic("cannot allocate neighbour cache hashes");
1490
1491 rwlock_init(&tbl->lock);
1492 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1493 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1494 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1495 skb_queue_head_init_class(&tbl->proxy_queue,
1496 &neigh_table_proxy_queue_class);
1497
1498 tbl->last_flush = now;
1499 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1500}
1501EXPORT_SYMBOL(neigh_table_init_no_netlink);
1502
1503void neigh_table_init(struct neigh_table *tbl)
1504{
1505 struct neigh_table *tmp;
1506
1507 neigh_table_init_no_netlink(tbl);
1508 write_lock(&neigh_tbl_lock);
1509 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1510 if (tmp->family == tbl->family)
1511 break;
1512 }
1513 tbl->next = neigh_tables;
1514 neigh_tables = tbl;
1515 write_unlock(&neigh_tbl_lock);
1516
1517 if (unlikely(tmp)) {
1518 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1519 "family %d\n", tbl->family);
1520 dump_stack();
1521 }
1522}
1523EXPORT_SYMBOL(neigh_table_init);
1524
1525int neigh_table_clear(struct neigh_table *tbl)
1526{
1527 struct neigh_table **tp;
1528
1529 /* It is not clean... Fix it to unload IPv6 module safely */
1530 cancel_delayed_work_sync(&tbl->gc_work);
1531 del_timer_sync(&tbl->proxy_timer);
1532 pneigh_queue_purge(&tbl->proxy_queue);
1533 neigh_ifdown(tbl, NULL);
1534 if (atomic_read(&tbl->entries))
1535 printk(KERN_CRIT "neighbour leakage\n");
1536 write_lock(&neigh_tbl_lock);
1537 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1538 if (*tp == tbl) {
1539 *tp = tbl->next;
1540 break;
1541 }
1542 }
1543 write_unlock(&neigh_tbl_lock);
1544
1545 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1546 neigh_hash_free_rcu);
1547 tbl->nht = NULL;
1548
1549 kfree(tbl->phash_buckets);
1550 tbl->phash_buckets = NULL;
1551
1552 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1553
1554 free_percpu(tbl->stats);
1555 tbl->stats = NULL;
1556
1557 kmem_cache_destroy(tbl->kmem_cachep);
1558 tbl->kmem_cachep = NULL;
1559
1560 return 0;
1561}
1562EXPORT_SYMBOL(neigh_table_clear);
1563
1564static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1565{
1566 struct net *net = sock_net(skb->sk);
1567 struct ndmsg *ndm;
1568 struct nlattr *dst_attr;
1569 struct neigh_table *tbl;
1570 struct net_device *dev = NULL;
1571 int err = -EINVAL;
1572
1573 ASSERT_RTNL();
1574 if (nlmsg_len(nlh) < sizeof(*ndm))
1575 goto out;
1576
1577 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1578 if (dst_attr == NULL)
1579 goto out;
1580
1581 ndm = nlmsg_data(nlh);
1582 if (ndm->ndm_ifindex) {
1583 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1584 if (dev == NULL) {
1585 err = -ENODEV;
1586 goto out;
1587 }
1588 }
1589
1590 read_lock(&neigh_tbl_lock);
1591 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1592 struct neighbour *neigh;
1593
1594 if (tbl->family != ndm->ndm_family)
1595 continue;
1596 read_unlock(&neigh_tbl_lock);
1597
1598 if (nla_len(dst_attr) < tbl->key_len)
1599 goto out;
1600
1601 if (ndm->ndm_flags & NTF_PROXY) {
1602 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1603 goto out;
1604 }
1605
1606 if (dev == NULL)
1607 goto out;
1608
1609 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1610 if (neigh == NULL) {
1611 err = -ENOENT;
1612 goto out;
1613 }
1614
1615 err = neigh_update(neigh, NULL, NUD_FAILED,
1616 NEIGH_UPDATE_F_OVERRIDE |
1617 NEIGH_UPDATE_F_ADMIN);
1618 neigh_release(neigh);
1619 goto out;
1620 }
1621 read_unlock(&neigh_tbl_lock);
1622 err = -EAFNOSUPPORT;
1623
1624out:
1625 return err;
1626}
1627
1628static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1629{
1630 struct net *net = sock_net(skb->sk);
1631 struct ndmsg *ndm;
1632 struct nlattr *tb[NDA_MAX+1];
1633 struct neigh_table *tbl;
1634 struct net_device *dev = NULL;
1635 int err;
1636
1637 ASSERT_RTNL();
1638 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1639 if (err < 0)
1640 goto out;
1641
1642 err = -EINVAL;
1643 if (tb[NDA_DST] == NULL)
1644 goto out;
1645
1646 ndm = nlmsg_data(nlh);
1647 if (ndm->ndm_ifindex) {
1648 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1649 if (dev == NULL) {
1650 err = -ENODEV;
1651 goto out;
1652 }
1653
1654 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1655 goto out;
1656 }
1657
1658 read_lock(&neigh_tbl_lock);
1659 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1660 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1661 struct neighbour *neigh;
1662 void *dst, *lladdr;
1663
1664 if (tbl->family != ndm->ndm_family)
1665 continue;
1666 read_unlock(&neigh_tbl_lock);
1667
1668 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1669 goto out;
1670 dst = nla_data(tb[NDA_DST]);
1671 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1672
1673 if (ndm->ndm_flags & NTF_PROXY) {
1674 struct pneigh_entry *pn;
1675
1676 err = -ENOBUFS;
1677 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1678 if (pn) {
1679 pn->flags = ndm->ndm_flags;
1680 err = 0;
1681 }
1682 goto out;
1683 }
1684
1685 if (dev == NULL)
1686 goto out;
1687
1688 neigh = neigh_lookup(tbl, dst, dev);
1689 if (neigh == NULL) {
1690 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1691 err = -ENOENT;
1692 goto out;
1693 }
1694
1695 neigh = __neigh_lookup_errno(tbl, dst, dev);
1696 if (IS_ERR(neigh)) {
1697 err = PTR_ERR(neigh);
1698 goto out;
1699 }
1700 } else {
1701 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1702 err = -EEXIST;
1703 neigh_release(neigh);
1704 goto out;
1705 }
1706
1707 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1708 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1709 }
1710
1711 if (ndm->ndm_flags & NTF_USE) {
1712 neigh_event_send(neigh, NULL);
1713 err = 0;
1714 } else
1715 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1716 neigh_release(neigh);
1717 goto out;
1718 }
1719
1720 read_unlock(&neigh_tbl_lock);
1721 err = -EAFNOSUPPORT;
1722out:
1723 return err;
1724}
1725
1726static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1727{
1728 struct nlattr *nest;
1729
1730 nest = nla_nest_start(skb, NDTA_PARMS);
1731 if (nest == NULL)
1732 return -ENOBUFS;
1733
1734 if (parms->dev)
1735 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1736
1737 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1738 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1739 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1740 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1741 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1742 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1743 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1744 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1745 parms->base_reachable_time);
1746 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1747 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1748 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1749 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1750 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1751 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1752
1753 return nla_nest_end(skb, nest);
1754
1755nla_put_failure:
1756 nla_nest_cancel(skb, nest);
1757 return -EMSGSIZE;
1758}
1759
1760static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1761 u32 pid, u32 seq, int type, int flags)
1762{
1763 struct nlmsghdr *nlh;
1764 struct ndtmsg *ndtmsg;
1765
1766 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1767 if (nlh == NULL)
1768 return -EMSGSIZE;
1769
1770 ndtmsg = nlmsg_data(nlh);
1771
1772 read_lock_bh(&tbl->lock);
1773 ndtmsg->ndtm_family = tbl->family;
1774 ndtmsg->ndtm_pad1 = 0;
1775 ndtmsg->ndtm_pad2 = 0;
1776
1777 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1778 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1779 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1780 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1781 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1782
1783 {
1784 unsigned long now = jiffies;
1785 unsigned int flush_delta = now - tbl->last_flush;
1786 unsigned int rand_delta = now - tbl->last_rand;
1787 struct neigh_hash_table *nht;
1788 struct ndt_config ndc = {
1789 .ndtc_key_len = tbl->key_len,
1790 .ndtc_entry_size = tbl->entry_size,
1791 .ndtc_entries = atomic_read(&tbl->entries),
1792 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1793 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1794 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1795 };
1796
1797 rcu_read_lock_bh();
1798 nht = rcu_dereference_bh(tbl->nht);
1799 ndc.ndtc_hash_rnd = nht->hash_rnd;
1800 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1801 rcu_read_unlock_bh();
1802
1803 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1804 }
1805
1806 {
1807 int cpu;
1808 struct ndt_stats ndst;
1809
1810 memset(&ndst, 0, sizeof(ndst));
1811
1812 for_each_possible_cpu(cpu) {
1813 struct neigh_statistics *st;
1814
1815 st = per_cpu_ptr(tbl->stats, cpu);
1816 ndst.ndts_allocs += st->allocs;
1817 ndst.ndts_destroys += st->destroys;
1818 ndst.ndts_hash_grows += st->hash_grows;
1819 ndst.ndts_res_failed += st->res_failed;
1820 ndst.ndts_lookups += st->lookups;
1821 ndst.ndts_hits += st->hits;
1822 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1823 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1824 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1825 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1826 }
1827
1828 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1829 }
1830
1831 BUG_ON(tbl->parms.dev);
1832 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1833 goto nla_put_failure;
1834
1835 read_unlock_bh(&tbl->lock);
1836 return nlmsg_end(skb, nlh);
1837
1838nla_put_failure:
1839 read_unlock_bh(&tbl->lock);
1840 nlmsg_cancel(skb, nlh);
1841 return -EMSGSIZE;
1842}
1843
1844static int neightbl_fill_param_info(struct sk_buff *skb,
1845 struct neigh_table *tbl,
1846 struct neigh_parms *parms,
1847 u32 pid, u32 seq, int type,
1848 unsigned int flags)
1849{
1850 struct ndtmsg *ndtmsg;
1851 struct nlmsghdr *nlh;
1852
1853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1854 if (nlh == NULL)
1855 return -EMSGSIZE;
1856
1857 ndtmsg = nlmsg_data(nlh);
1858
1859 read_lock_bh(&tbl->lock);
1860 ndtmsg->ndtm_family = tbl->family;
1861 ndtmsg->ndtm_pad1 = 0;
1862 ndtmsg->ndtm_pad2 = 0;
1863
1864 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1865 neightbl_fill_parms(skb, parms) < 0)
1866 goto errout;
1867
1868 read_unlock_bh(&tbl->lock);
1869 return nlmsg_end(skb, nlh);
1870errout:
1871 read_unlock_bh(&tbl->lock);
1872 nlmsg_cancel(skb, nlh);
1873 return -EMSGSIZE;
1874}
1875
1876static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1877 [NDTA_NAME] = { .type = NLA_STRING },
1878 [NDTA_THRESH1] = { .type = NLA_U32 },
1879 [NDTA_THRESH2] = { .type = NLA_U32 },
1880 [NDTA_THRESH3] = { .type = NLA_U32 },
1881 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1882 [NDTA_PARMS] = { .type = NLA_NESTED },
1883};
1884
1885static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1886 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1887 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1888 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1889 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1890 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1891 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1892 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1893 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1894 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1895 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1896 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1897 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1898 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1899};
1900
1901static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1902{
1903 struct net *net = sock_net(skb->sk);
1904 struct neigh_table *tbl;
1905 struct ndtmsg *ndtmsg;
1906 struct nlattr *tb[NDTA_MAX+1];
1907 int err;
1908
1909 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1910 nl_neightbl_policy);
1911 if (err < 0)
1912 goto errout;
1913
1914 if (tb[NDTA_NAME] == NULL) {
1915 err = -EINVAL;
1916 goto errout;
1917 }
1918
1919 ndtmsg = nlmsg_data(nlh);
1920 read_lock(&neigh_tbl_lock);
1921 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1922 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1923 continue;
1924
1925 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1926 break;
1927 }
1928
1929 if (tbl == NULL) {
1930 err = -ENOENT;
1931 goto errout_locked;
1932 }
1933
1934 /*
1935 * We acquire tbl->lock to be nice to the periodic timers and
1936 * make sure they always see a consistent set of values.
1937 */
1938 write_lock_bh(&tbl->lock);
1939
1940 if (tb[NDTA_PARMS]) {
1941 struct nlattr *tbp[NDTPA_MAX+1];
1942 struct neigh_parms *p;
1943 int i, ifindex = 0;
1944
1945 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1946 nl_ntbl_parm_policy);
1947 if (err < 0)
1948 goto errout_tbl_lock;
1949
1950 if (tbp[NDTPA_IFINDEX])
1951 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1952
1953 p = lookup_neigh_parms(tbl, net, ifindex);
1954 if (p == NULL) {
1955 err = -ENOENT;
1956 goto errout_tbl_lock;
1957 }
1958
1959 for (i = 1; i <= NDTPA_MAX; i++) {
1960 if (tbp[i] == NULL)
1961 continue;
1962
1963 switch (i) {
1964 case NDTPA_QUEUE_LEN:
1965 p->queue_len = nla_get_u32(tbp[i]);
1966 break;
1967 case NDTPA_PROXY_QLEN:
1968 p->proxy_qlen = nla_get_u32(tbp[i]);
1969 break;
1970 case NDTPA_APP_PROBES:
1971 p->app_probes = nla_get_u32(tbp[i]);
1972 break;
1973 case NDTPA_UCAST_PROBES:
1974 p->ucast_probes = nla_get_u32(tbp[i]);
1975 break;
1976 case NDTPA_MCAST_PROBES:
1977 p->mcast_probes = nla_get_u32(tbp[i]);
1978 break;
1979 case NDTPA_BASE_REACHABLE_TIME:
1980 p->base_reachable_time = nla_get_msecs(tbp[i]);
1981 break;
1982 case NDTPA_GC_STALETIME:
1983 p->gc_staletime = nla_get_msecs(tbp[i]);
1984 break;
1985 case NDTPA_DELAY_PROBE_TIME:
1986 p->delay_probe_time = nla_get_msecs(tbp[i]);
1987 break;
1988 case NDTPA_RETRANS_TIME:
1989 p->retrans_time = nla_get_msecs(tbp[i]);
1990 break;
1991 case NDTPA_ANYCAST_DELAY:
1992 p->anycast_delay = nla_get_msecs(tbp[i]);
1993 break;
1994 case NDTPA_PROXY_DELAY:
1995 p->proxy_delay = nla_get_msecs(tbp[i]);
1996 break;
1997 case NDTPA_LOCKTIME:
1998 p->locktime = nla_get_msecs(tbp[i]);
1999 break;
2000 }
2001 }
2002 }
2003
2004 if (tb[NDTA_THRESH1])
2005 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2006
2007 if (tb[NDTA_THRESH2])
2008 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2009
2010 if (tb[NDTA_THRESH3])
2011 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2012
2013 if (tb[NDTA_GC_INTERVAL])
2014 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2015
2016 err = 0;
2017
2018errout_tbl_lock:
2019 write_unlock_bh(&tbl->lock);
2020errout_locked:
2021 read_unlock(&neigh_tbl_lock);
2022errout:
2023 return err;
2024}
2025
2026static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2027{
2028 struct net *net = sock_net(skb->sk);
2029 int family, tidx, nidx = 0;
2030 int tbl_skip = cb->args[0];
2031 int neigh_skip = cb->args[1];
2032 struct neigh_table *tbl;
2033
2034 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2035
2036 read_lock(&neigh_tbl_lock);
2037 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2038 struct neigh_parms *p;
2039
2040 if (tidx < tbl_skip || (family && tbl->family != family))
2041 continue;
2042
2043 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2044 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2045 NLM_F_MULTI) <= 0)
2046 break;
2047
2048 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2049 if (!net_eq(neigh_parms_net(p), net))
2050 continue;
2051
2052 if (nidx < neigh_skip)
2053 goto next;
2054
2055 if (neightbl_fill_param_info(skb, tbl, p,
2056 NETLINK_CB(cb->skb).pid,
2057 cb->nlh->nlmsg_seq,
2058 RTM_NEWNEIGHTBL,
2059 NLM_F_MULTI) <= 0)
2060 goto out;
2061 next:
2062 nidx++;
2063 }
2064
2065 neigh_skip = 0;
2066 }
2067out:
2068 read_unlock(&neigh_tbl_lock);
2069 cb->args[0] = tidx;
2070 cb->args[1] = nidx;
2071
2072 return skb->len;
2073}
2074
2075static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2076 u32 pid, u32 seq, int type, unsigned int flags)
2077{
2078 unsigned long now = jiffies;
2079 struct nda_cacheinfo ci;
2080 struct nlmsghdr *nlh;
2081 struct ndmsg *ndm;
2082
2083 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2084 if (nlh == NULL)
2085 return -EMSGSIZE;
2086
2087 ndm = nlmsg_data(nlh);
2088 ndm->ndm_family = neigh->ops->family;
2089 ndm->ndm_pad1 = 0;
2090 ndm->ndm_pad2 = 0;
2091 ndm->ndm_flags = neigh->flags;
2092 ndm->ndm_type = neigh->type;
2093 ndm->ndm_ifindex = neigh->dev->ifindex;
2094
2095 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2096
2097 read_lock_bh(&neigh->lock);
2098 ndm->ndm_state = neigh->nud_state;
2099 if (neigh->nud_state & NUD_VALID) {
2100 char haddr[MAX_ADDR_LEN];
2101
2102 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2103 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2104 read_unlock_bh(&neigh->lock);
2105 goto nla_put_failure;
2106 }
2107 }
2108
2109 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2110 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2111 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2112 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2113 read_unlock_bh(&neigh->lock);
2114
2115 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2116 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2117
2118 return nlmsg_end(skb, nlh);
2119
2120nla_put_failure:
2121 nlmsg_cancel(skb, nlh);
2122 return -EMSGSIZE;
2123}
2124
2125static void neigh_update_notify(struct neighbour *neigh)
2126{
2127 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2128 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2129}
2130
2131static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2132 struct netlink_callback *cb)
2133{
2134 struct net *net = sock_net(skb->sk);
2135 struct neighbour *n;
2136 int rc, h, s_h = cb->args[1];
2137 int idx, s_idx = idx = cb->args[2];
2138 struct neigh_hash_table *nht;
2139
2140 rcu_read_lock_bh();
2141 nht = rcu_dereference_bh(tbl->nht);
2142
2143 for (h = 0; h < (1 << nht->hash_shift); h++) {
2144 if (h < s_h)
2145 continue;
2146 if (h > s_h)
2147 s_idx = 0;
2148 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2149 n != NULL;
2150 n = rcu_dereference_bh(n->next)) {
2151 if (!net_eq(dev_net(n->dev), net))
2152 continue;
2153 if (idx < s_idx)
2154 goto next;
2155 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2156 cb->nlh->nlmsg_seq,
2157 RTM_NEWNEIGH,
2158 NLM_F_MULTI) <= 0) {
2159 rc = -1;
2160 goto out;
2161 }
2162next:
2163 idx++;
2164 }
2165 }
2166 rc = skb->len;
2167out:
2168 rcu_read_unlock_bh();
2169 cb->args[1] = h;
2170 cb->args[2] = idx;
2171 return rc;
2172}
2173
2174static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2175{
2176 struct neigh_table *tbl;
2177 int t, family, s_t;
2178
2179 read_lock(&neigh_tbl_lock);
2180 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2181 s_t = cb->args[0];
2182
2183 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2184 if (t < s_t || (family && tbl->family != family))
2185 continue;
2186 if (t > s_t)
2187 memset(&cb->args[1], 0, sizeof(cb->args) -
2188 sizeof(cb->args[0]));
2189 if (neigh_dump_table(tbl, skb, cb) < 0)
2190 break;
2191 }
2192 read_unlock(&neigh_tbl_lock);
2193
2194 cb->args[0] = t;
2195 return skb->len;
2196}
2197
2198void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2199{
2200 int chain;
2201 struct neigh_hash_table *nht;
2202
2203 rcu_read_lock_bh();
2204 nht = rcu_dereference_bh(tbl->nht);
2205
2206 read_lock(&tbl->lock); /* avoid resizes */
2207 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2208 struct neighbour *n;
2209
2210 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2211 n != NULL;
2212 n = rcu_dereference_bh(n->next))
2213 cb(n, cookie);
2214 }
2215 read_unlock(&tbl->lock);
2216 rcu_read_unlock_bh();
2217}
2218EXPORT_SYMBOL(neigh_for_each);
2219
2220/* The tbl->lock must be held as a writer and BH disabled. */
2221void __neigh_for_each_release(struct neigh_table *tbl,
2222 int (*cb)(struct neighbour *))
2223{
2224 int chain;
2225 struct neigh_hash_table *nht;
2226
2227 nht = rcu_dereference_protected(tbl->nht,
2228 lockdep_is_held(&tbl->lock));
2229 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2230 struct neighbour *n;
2231 struct neighbour __rcu **np;
2232
2233 np = &nht->hash_buckets[chain];
2234 while ((n = rcu_dereference_protected(*np,
2235 lockdep_is_held(&tbl->lock))) != NULL) {
2236 int release;
2237
2238 write_lock(&n->lock);
2239 release = cb(n);
2240 if (release) {
2241 rcu_assign_pointer(*np,
2242 rcu_dereference_protected(n->next,
2243 lockdep_is_held(&tbl->lock)));
2244 n->dead = 1;
2245 } else
2246 np = &n->next;
2247 write_unlock(&n->lock);
2248 if (release)
2249 neigh_cleanup_and_release(n);
2250 }
2251 }
2252}
2253EXPORT_SYMBOL(__neigh_for_each_release);
2254
2255#ifdef CONFIG_PROC_FS
2256
2257static struct neighbour *neigh_get_first(struct seq_file *seq)
2258{
2259 struct neigh_seq_state *state = seq->private;
2260 struct net *net = seq_file_net(seq);
2261 struct neigh_hash_table *nht = state->nht;
2262 struct neighbour *n = NULL;
2263 int bucket = state->bucket;
2264
2265 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2266 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2267 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2268
2269 while (n) {
2270 if (!net_eq(dev_net(n->dev), net))
2271 goto next;
2272 if (state->neigh_sub_iter) {
2273 loff_t fakep = 0;
2274 void *v;
2275
2276 v = state->neigh_sub_iter(state, n, &fakep);
2277 if (!v)
2278 goto next;
2279 }
2280 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2281 break;
2282 if (n->nud_state & ~NUD_NOARP)
2283 break;
2284next:
2285 n = rcu_dereference_bh(n->next);
2286 }
2287
2288 if (n)
2289 break;
2290 }
2291 state->bucket = bucket;
2292
2293 return n;
2294}
2295
2296static struct neighbour *neigh_get_next(struct seq_file *seq,
2297 struct neighbour *n,
2298 loff_t *pos)
2299{
2300 struct neigh_seq_state *state = seq->private;
2301 struct net *net = seq_file_net(seq);
2302 struct neigh_hash_table *nht = state->nht;
2303
2304 if (state->neigh_sub_iter) {
2305 void *v = state->neigh_sub_iter(state, n, pos);
2306 if (v)
2307 return n;
2308 }
2309 n = rcu_dereference_bh(n->next);
2310
2311 while (1) {
2312 while (n) {
2313 if (!net_eq(dev_net(n->dev), net))
2314 goto next;
2315 if (state->neigh_sub_iter) {
2316 void *v = state->neigh_sub_iter(state, n, pos);
2317 if (v)
2318 return n;
2319 goto next;
2320 }
2321 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2322 break;
2323
2324 if (n->nud_state & ~NUD_NOARP)
2325 break;
2326next:
2327 n = rcu_dereference_bh(n->next);
2328 }
2329
2330 if (n)
2331 break;
2332
2333 if (++state->bucket >= (1 << nht->hash_shift))
2334 break;
2335
2336 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2337 }
2338
2339 if (n && pos)
2340 --(*pos);
2341 return n;
2342}
2343
2344static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2345{
2346 struct neighbour *n = neigh_get_first(seq);
2347
2348 if (n) {
2349 --(*pos);
2350 while (*pos) {
2351 n = neigh_get_next(seq, n, pos);
2352 if (!n)
2353 break;
2354 }
2355 }
2356 return *pos ? NULL : n;
2357}
2358
2359static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2360{
2361 struct neigh_seq_state *state = seq->private;
2362 struct net *net = seq_file_net(seq);
2363 struct neigh_table *tbl = state->tbl;
2364 struct pneigh_entry *pn = NULL;
2365 int bucket = state->bucket;
2366
2367 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2368 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2369 pn = tbl->phash_buckets[bucket];
2370 while (pn && !net_eq(pneigh_net(pn), net))
2371 pn = pn->next;
2372 if (pn)
2373 break;
2374 }
2375 state->bucket = bucket;
2376
2377 return pn;
2378}
2379
2380static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2381 struct pneigh_entry *pn,
2382 loff_t *pos)
2383{
2384 struct neigh_seq_state *state = seq->private;
2385 struct net *net = seq_file_net(seq);
2386 struct neigh_table *tbl = state->tbl;
2387
2388 pn = pn->next;
2389 while (!pn) {
2390 if (++state->bucket > PNEIGH_HASHMASK)
2391 break;
2392 pn = tbl->phash_buckets[state->bucket];
2393 while (pn && !net_eq(pneigh_net(pn), net))
2394 pn = pn->next;
2395 if (pn)
2396 break;
2397 }
2398
2399 if (pn && pos)
2400 --(*pos);
2401
2402 return pn;
2403}
2404
2405static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2406{
2407 struct pneigh_entry *pn = pneigh_get_first(seq);
2408
2409 if (pn) {
2410 --(*pos);
2411 while (*pos) {
2412 pn = pneigh_get_next(seq, pn, pos);
2413 if (!pn)
2414 break;
2415 }
2416 }
2417 return *pos ? NULL : pn;
2418}
2419
2420static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2421{
2422 struct neigh_seq_state *state = seq->private;
2423 void *rc;
2424 loff_t idxpos = *pos;
2425
2426 rc = neigh_get_idx(seq, &idxpos);
2427 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2428 rc = pneigh_get_idx(seq, &idxpos);
2429
2430 return rc;
2431}
2432
2433void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2434 __acquires(rcu_bh)
2435{
2436 struct neigh_seq_state *state = seq->private;
2437
2438 state->tbl = tbl;
2439 state->bucket = 0;
2440 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2441
2442 rcu_read_lock_bh();
2443 state->nht = rcu_dereference_bh(tbl->nht);
2444
2445 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2446}
2447EXPORT_SYMBOL(neigh_seq_start);
2448
2449void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2450{
2451 struct neigh_seq_state *state;
2452 void *rc;
2453
2454 if (v == SEQ_START_TOKEN) {
2455 rc = neigh_get_first(seq);
2456 goto out;
2457 }
2458
2459 state = seq->private;
2460 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2461 rc = neigh_get_next(seq, v, NULL);
2462 if (rc)
2463 goto out;
2464 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2465 rc = pneigh_get_first(seq);
2466 } else {
2467 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2468 rc = pneigh_get_next(seq, v, NULL);
2469 }
2470out:
2471 ++(*pos);
2472 return rc;
2473}
2474EXPORT_SYMBOL(neigh_seq_next);
2475
2476void neigh_seq_stop(struct seq_file *seq, void *v)
2477 __releases(rcu_bh)
2478{
2479 rcu_read_unlock_bh();
2480}
2481EXPORT_SYMBOL(neigh_seq_stop);
2482
2483/* statistics via seq_file */
2484
2485static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2486{
2487 struct neigh_table *tbl = seq->private;
2488 int cpu;
2489
2490 if (*pos == 0)
2491 return SEQ_START_TOKEN;
2492
2493 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2494 if (!cpu_possible(cpu))
2495 continue;
2496 *pos = cpu+1;
2497 return per_cpu_ptr(tbl->stats, cpu);
2498 }
2499 return NULL;
2500}
2501
2502static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2503{
2504 struct neigh_table *tbl = seq->private;
2505 int cpu;
2506
2507 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2508 if (!cpu_possible(cpu))
2509 continue;
2510 *pos = cpu+1;
2511 return per_cpu_ptr(tbl->stats, cpu);
2512 }
2513 return NULL;
2514}
2515
2516static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2517{
2518
2519}
2520
2521static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2522{
2523 struct neigh_table *tbl = seq->private;
2524 struct neigh_statistics *st = v;
2525
2526 if (v == SEQ_START_TOKEN) {
2527 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2528 return 0;
2529 }
2530
2531 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2532 "%08lx %08lx %08lx %08lx %08lx\n",
2533 atomic_read(&tbl->entries),
2534
2535 st->allocs,
2536 st->destroys,
2537 st->hash_grows,
2538
2539 st->lookups,
2540 st->hits,
2541
2542 st->res_failed,
2543
2544 st->rcv_probes_mcast,
2545 st->rcv_probes_ucast,
2546
2547 st->periodic_gc_runs,
2548 st->forced_gc_runs,
2549 st->unres_discards
2550 );
2551
2552 return 0;
2553}
2554
2555static const struct seq_operations neigh_stat_seq_ops = {
2556 .start = neigh_stat_seq_start,
2557 .next = neigh_stat_seq_next,
2558 .stop = neigh_stat_seq_stop,
2559 .show = neigh_stat_seq_show,
2560};
2561
2562static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2563{
2564 int ret = seq_open(file, &neigh_stat_seq_ops);
2565
2566 if (!ret) {
2567 struct seq_file *sf = file->private_data;
2568 sf->private = PDE(inode)->data;
2569 }
2570 return ret;
2571};
2572
2573static const struct file_operations neigh_stat_seq_fops = {
2574 .owner = THIS_MODULE,
2575 .open = neigh_stat_seq_open,
2576 .read = seq_read,
2577 .llseek = seq_lseek,
2578 .release = seq_release,
2579};
2580
2581#endif /* CONFIG_PROC_FS */
2582
2583static inline size_t neigh_nlmsg_size(void)
2584{
2585 return NLMSG_ALIGN(sizeof(struct ndmsg))
2586 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2587 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2588 + nla_total_size(sizeof(struct nda_cacheinfo))
2589 + nla_total_size(4); /* NDA_PROBES */
2590}
2591
2592static void __neigh_notify(struct neighbour *n, int type, int flags)
2593{
2594 struct net *net = dev_net(n->dev);
2595 struct sk_buff *skb;
2596 int err = -ENOBUFS;
2597
2598 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2599 if (skb == NULL)
2600 goto errout;
2601
2602 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2603 if (err < 0) {
2604 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2605 WARN_ON(err == -EMSGSIZE);
2606 kfree_skb(skb);
2607 goto errout;
2608 }
2609 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2610 return;
2611errout:
2612 if (err < 0)
2613 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2614}
2615
2616#ifdef CONFIG_ARPD
2617void neigh_app_ns(struct neighbour *n)
2618{
2619 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2620}
2621EXPORT_SYMBOL(neigh_app_ns);
2622#endif /* CONFIG_ARPD */
2623
2624#ifdef CONFIG_SYSCTL
2625
2626#define NEIGH_VARS_MAX 19
2627
2628static struct neigh_sysctl_table {
2629 struct ctl_table_header *sysctl_header;
2630 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2631 char *dev_name;
2632} neigh_sysctl_template __read_mostly = {
2633 .neigh_vars = {
2634 {
2635 .procname = "mcast_solicit",
2636 .maxlen = sizeof(int),
2637 .mode = 0644,
2638 .proc_handler = proc_dointvec,
2639 },
2640 {
2641 .procname = "ucast_solicit",
2642 .maxlen = sizeof(int),
2643 .mode = 0644,
2644 .proc_handler = proc_dointvec,
2645 },
2646 {
2647 .procname = "app_solicit",
2648 .maxlen = sizeof(int),
2649 .mode = 0644,
2650 .proc_handler = proc_dointvec,
2651 },
2652 {
2653 .procname = "retrans_time",
2654 .maxlen = sizeof(int),
2655 .mode = 0644,
2656 .proc_handler = proc_dointvec_userhz_jiffies,
2657 },
2658 {
2659 .procname = "base_reachable_time",
2660 .maxlen = sizeof(int),
2661 .mode = 0644,
2662 .proc_handler = proc_dointvec_jiffies,
2663 },
2664 {
2665 .procname = "delay_first_probe_time",
2666 .maxlen = sizeof(int),
2667 .mode = 0644,
2668 .proc_handler = proc_dointvec_jiffies,
2669 },
2670 {
2671 .procname = "gc_stale_time",
2672 .maxlen = sizeof(int),
2673 .mode = 0644,
2674 .proc_handler = proc_dointvec_jiffies,
2675 },
2676 {
2677 .procname = "unres_qlen",
2678 .maxlen = sizeof(int),
2679 .mode = 0644,
2680 .proc_handler = proc_dointvec,
2681 },
2682 {
2683 .procname = "proxy_qlen",
2684 .maxlen = sizeof(int),
2685 .mode = 0644,
2686 .proc_handler = proc_dointvec,
2687 },
2688 {
2689 .procname = "anycast_delay",
2690 .maxlen = sizeof(int),
2691 .mode = 0644,
2692 .proc_handler = proc_dointvec_userhz_jiffies,
2693 },
2694 {
2695 .procname = "proxy_delay",
2696 .maxlen = sizeof(int),
2697 .mode = 0644,
2698 .proc_handler = proc_dointvec_userhz_jiffies,
2699 },
2700 {
2701 .procname = "locktime",
2702 .maxlen = sizeof(int),
2703 .mode = 0644,
2704 .proc_handler = proc_dointvec_userhz_jiffies,
2705 },
2706 {
2707 .procname = "retrans_time_ms",
2708 .maxlen = sizeof(int),
2709 .mode = 0644,
2710 .proc_handler = proc_dointvec_ms_jiffies,
2711 },
2712 {
2713 .procname = "base_reachable_time_ms",
2714 .maxlen = sizeof(int),
2715 .mode = 0644,
2716 .proc_handler = proc_dointvec_ms_jiffies,
2717 },
2718 {
2719 .procname = "gc_interval",
2720 .maxlen = sizeof(int),
2721 .mode = 0644,
2722 .proc_handler = proc_dointvec_jiffies,
2723 },
2724 {
2725 .procname = "gc_thresh1",
2726 .maxlen = sizeof(int),
2727 .mode = 0644,
2728 .proc_handler = proc_dointvec,
2729 },
2730 {
2731 .procname = "gc_thresh2",
2732 .maxlen = sizeof(int),
2733 .mode = 0644,
2734 .proc_handler = proc_dointvec,
2735 },
2736 {
2737 .procname = "gc_thresh3",
2738 .maxlen = sizeof(int),
2739 .mode = 0644,
2740 .proc_handler = proc_dointvec,
2741 },
2742 {},
2743 },
2744};
2745
2746int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2747 char *p_name, proc_handler *handler)
2748{
2749 struct neigh_sysctl_table *t;
2750 const char *dev_name_source = NULL;
2751
2752#define NEIGH_CTL_PATH_ROOT 0
2753#define NEIGH_CTL_PATH_PROTO 1
2754#define NEIGH_CTL_PATH_NEIGH 2
2755#define NEIGH_CTL_PATH_DEV 3
2756
2757 struct ctl_path neigh_path[] = {
2758 { .procname = "net", },
2759 { .procname = "proto", },
2760 { .procname = "neigh", },
2761 { .procname = "default", },
2762 { },
2763 };
2764
2765 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2766 if (!t)
2767 goto err;
2768
2769 t->neigh_vars[0].data = &p->mcast_probes;
2770 t->neigh_vars[1].data = &p->ucast_probes;
2771 t->neigh_vars[2].data = &p->app_probes;
2772 t->neigh_vars[3].data = &p->retrans_time;
2773 t->neigh_vars[4].data = &p->base_reachable_time;
2774 t->neigh_vars[5].data = &p->delay_probe_time;
2775 t->neigh_vars[6].data = &p->gc_staletime;
2776 t->neigh_vars[7].data = &p->queue_len;
2777 t->neigh_vars[8].data = &p->proxy_qlen;
2778 t->neigh_vars[9].data = &p->anycast_delay;
2779 t->neigh_vars[10].data = &p->proxy_delay;
2780 t->neigh_vars[11].data = &p->locktime;
2781 t->neigh_vars[12].data = &p->retrans_time;
2782 t->neigh_vars[13].data = &p->base_reachable_time;
2783
2784 if (dev) {
2785 dev_name_source = dev->name;
2786 /* Terminate the table early */
2787 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2788 } else {
2789 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2790 t->neigh_vars[14].data = (int *)(p + 1);
2791 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2792 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2793 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2794 }
2795
2796
2797 if (handler) {
2798 /* RetransTime */
2799 t->neigh_vars[3].proc_handler = handler;
2800 t->neigh_vars[3].extra1 = dev;
2801 /* ReachableTime */
2802 t->neigh_vars[4].proc_handler = handler;
2803 t->neigh_vars[4].extra1 = dev;
2804 /* RetransTime (in milliseconds)*/
2805 t->neigh_vars[12].proc_handler = handler;
2806 t->neigh_vars[12].extra1 = dev;
2807 /* ReachableTime (in milliseconds) */
2808 t->neigh_vars[13].proc_handler = handler;
2809 t->neigh_vars[13].extra1 = dev;
2810 }
2811
2812 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2813 if (!t->dev_name)
2814 goto free;
2815
2816 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2817 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2818
2819 t->sysctl_header =
2820 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2821 if (!t->sysctl_header)
2822 goto free_procname;
2823
2824 p->sysctl_table = t;
2825 return 0;
2826
2827free_procname:
2828 kfree(t->dev_name);
2829free:
2830 kfree(t);
2831err:
2832 return -ENOBUFS;
2833}
2834EXPORT_SYMBOL(neigh_sysctl_register);
2835
2836void neigh_sysctl_unregister(struct neigh_parms *p)
2837{
2838 if (p->sysctl_table) {
2839 struct neigh_sysctl_table *t = p->sysctl_table;
2840 p->sysctl_table = NULL;
2841 unregister_sysctl_table(t->sysctl_header);
2842 kfree(t->dev_name);
2843 kfree(t);
2844 }
2845}
2846EXPORT_SYMBOL(neigh_sysctl_unregister);
2847
2848#endif /* CONFIG_SYSCTL */
2849
2850static int __init neigh_init(void)
2851{
2852 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
2853 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
2854 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
2855
2856 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
2857 NULL);
2858 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
2859
2860 return 0;
2861}
2862
2863subsys_initcall(neigh_init);
2864
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/socket.h>
25#include <linux/netdevice.h>
26#include <linux/proc_fs.h>
27#ifdef CONFIG_SYSCTL
28#include <linux/sysctl.h>
29#endif
30#include <linux/times.h>
31#include <net/net_namespace.h>
32#include <net/neighbour.h>
33#include <net/dst.h>
34#include <net/sock.h>
35#include <net/netevent.h>
36#include <net/netlink.h>
37#include <linux/rtnetlink.h>
38#include <linux/random.h>
39#include <linux/string.h>
40#include <linux/log2.h>
41#include <linux/inetdevice.h>
42#include <net/addrconf.h>
43
44#define DEBUG
45#define NEIGH_DEBUG 1
46#define neigh_dbg(level, fmt, ...) \
47do { \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
50} while (0)
51
52#define PNEIGH_HASHMASK 0xF
53
54static void neigh_timer_handler(unsigned long arg);
55static void __neigh_notify(struct neighbour *n, int type, int flags);
56static void neigh_update_notify(struct neighbour *neigh);
57static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
58
59#ifdef CONFIG_PROC_FS
60static const struct file_operations neigh_stat_seq_fops;
61#endif
62
63/*
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
65
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
70 cache.
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
73
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
77
78 Reference count prevents destruction.
79
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
82 - timer
83 - resolution queue
84
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
89 */
90
91static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
92{
93 kfree_skb(skb);
94 return -ENETDOWN;
95}
96
97static void neigh_cleanup_and_release(struct neighbour *neigh)
98{
99 if (neigh->parms->neigh_cleanup)
100 neigh->parms->neigh_cleanup(neigh);
101
102 __neigh_notify(neigh, RTM_DELNEIGH, 0);
103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
104 neigh_release(neigh);
105}
106
107/*
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
111 */
112
113unsigned long neigh_rand_reach_time(unsigned long base)
114{
115 return base ? (prandom_u32() % base) + (base >> 1) : 0;
116}
117EXPORT_SYMBOL(neigh_rand_reach_time);
118
119
120static int neigh_forced_gc(struct neigh_table *tbl)
121{
122 int shrunk = 0;
123 int i;
124 struct neigh_hash_table *nht;
125
126 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
127
128 write_lock_bh(&tbl->lock);
129 nht = rcu_dereference_protected(tbl->nht,
130 lockdep_is_held(&tbl->lock));
131 for (i = 0; i < (1 << nht->hash_shift); i++) {
132 struct neighbour *n;
133 struct neighbour __rcu **np;
134
135 np = &nht->hash_buckets[i];
136 while ((n = rcu_dereference_protected(*np,
137 lockdep_is_held(&tbl->lock))) != NULL) {
138 /* Neighbour record may be discarded if:
139 * - nobody refers to it.
140 * - it is not permanent
141 */
142 write_lock(&n->lock);
143 if (atomic_read(&n->refcnt) == 1 &&
144 !(n->nud_state & NUD_PERMANENT)) {
145 rcu_assign_pointer(*np,
146 rcu_dereference_protected(n->next,
147 lockdep_is_held(&tbl->lock)));
148 n->dead = 1;
149 shrunk = 1;
150 write_unlock(&n->lock);
151 neigh_cleanup_and_release(n);
152 continue;
153 }
154 write_unlock(&n->lock);
155 np = &n->next;
156 }
157 }
158
159 tbl->last_flush = jiffies;
160
161 write_unlock_bh(&tbl->lock);
162
163 return shrunk;
164}
165
166static void neigh_add_timer(struct neighbour *n, unsigned long when)
167{
168 neigh_hold(n);
169 if (unlikely(mod_timer(&n->timer, when))) {
170 printk("NEIGH: BUG, double timer add, state is %x\n",
171 n->nud_state);
172 dump_stack();
173 }
174}
175
176static int neigh_del_timer(struct neighbour *n)
177{
178 if ((n->nud_state & NUD_IN_TIMER) &&
179 del_timer(&n->timer)) {
180 neigh_release(n);
181 return 1;
182 }
183 return 0;
184}
185
186static void pneigh_queue_purge(struct sk_buff_head *list)
187{
188 struct sk_buff *skb;
189
190 while ((skb = skb_dequeue(list)) != NULL) {
191 dev_put(skb->dev);
192 kfree_skb(skb);
193 }
194}
195
196static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
197{
198 int i;
199 struct neigh_hash_table *nht;
200
201 nht = rcu_dereference_protected(tbl->nht,
202 lockdep_is_held(&tbl->lock));
203
204 for (i = 0; i < (1 << nht->hash_shift); i++) {
205 struct neighbour *n;
206 struct neighbour __rcu **np = &nht->hash_buckets[i];
207
208 while ((n = rcu_dereference_protected(*np,
209 lockdep_is_held(&tbl->lock))) != NULL) {
210 if (dev && n->dev != dev) {
211 np = &n->next;
212 continue;
213 }
214 rcu_assign_pointer(*np,
215 rcu_dereference_protected(n->next,
216 lockdep_is_held(&tbl->lock)));
217 write_lock(&n->lock);
218 neigh_del_timer(n);
219 n->dead = 1;
220
221 if (atomic_read(&n->refcnt) != 1) {
222 /* The most unpleasant situation.
223 We must destroy neighbour entry,
224 but someone still uses it.
225
226 The destroy will be delayed until
227 the last user releases us, but
228 we must kill timers etc. and move
229 it to safe state.
230 */
231 __skb_queue_purge(&n->arp_queue);
232 n->arp_queue_len_bytes = 0;
233 n->output = neigh_blackhole;
234 if (n->nud_state & NUD_VALID)
235 n->nud_state = NUD_NOARP;
236 else
237 n->nud_state = NUD_NONE;
238 neigh_dbg(2, "neigh %p is stray\n", n);
239 }
240 write_unlock(&n->lock);
241 neigh_cleanup_and_release(n);
242 }
243 }
244}
245
246void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
247{
248 write_lock_bh(&tbl->lock);
249 neigh_flush_dev(tbl, dev);
250 write_unlock_bh(&tbl->lock);
251}
252EXPORT_SYMBOL(neigh_changeaddr);
253
254int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
255{
256 write_lock_bh(&tbl->lock);
257 neigh_flush_dev(tbl, dev);
258 pneigh_ifdown(tbl, dev);
259 write_unlock_bh(&tbl->lock);
260
261 del_timer_sync(&tbl->proxy_timer);
262 pneigh_queue_purge(&tbl->proxy_queue);
263 return 0;
264}
265EXPORT_SYMBOL(neigh_ifdown);
266
267static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
268{
269 struct neighbour *n = NULL;
270 unsigned long now = jiffies;
271 int entries;
272
273 entries = atomic_inc_return(&tbl->entries) - 1;
274 if (entries >= tbl->gc_thresh3 ||
275 (entries >= tbl->gc_thresh2 &&
276 time_after(now, tbl->last_flush + 5 * HZ))) {
277 if (!neigh_forced_gc(tbl) &&
278 entries >= tbl->gc_thresh3) {
279 net_info_ratelimited("%s: neighbor table overflow!\n",
280 tbl->id);
281 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
282 goto out_entries;
283 }
284 }
285
286 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
287 if (!n)
288 goto out_entries;
289
290 __skb_queue_head_init(&n->arp_queue);
291 rwlock_init(&n->lock);
292 seqlock_init(&n->ha_lock);
293 n->updated = n->used = now;
294 n->nud_state = NUD_NONE;
295 n->output = neigh_blackhole;
296 seqlock_init(&n->hh.hh_lock);
297 n->parms = neigh_parms_clone(&tbl->parms);
298 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
299
300 NEIGH_CACHE_STAT_INC(tbl, allocs);
301 n->tbl = tbl;
302 atomic_set(&n->refcnt, 1);
303 n->dead = 1;
304out:
305 return n;
306
307out_entries:
308 atomic_dec(&tbl->entries);
309 goto out;
310}
311
312static void neigh_get_hash_rnd(u32 *x)
313{
314 get_random_bytes(x, sizeof(*x));
315 *x |= 1;
316}
317
318static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
319{
320 size_t size = (1 << shift) * sizeof(struct neighbour *);
321 struct neigh_hash_table *ret;
322 struct neighbour __rcu **buckets;
323 int i;
324
325 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
326 if (!ret)
327 return NULL;
328 if (size <= PAGE_SIZE)
329 buckets = kzalloc(size, GFP_ATOMIC);
330 else
331 buckets = (struct neighbour __rcu **)
332 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
333 get_order(size));
334 if (!buckets) {
335 kfree(ret);
336 return NULL;
337 }
338 ret->hash_buckets = buckets;
339 ret->hash_shift = shift;
340 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
341 neigh_get_hash_rnd(&ret->hash_rnd[i]);
342 return ret;
343}
344
345static void neigh_hash_free_rcu(struct rcu_head *head)
346{
347 struct neigh_hash_table *nht = container_of(head,
348 struct neigh_hash_table,
349 rcu);
350 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
351 struct neighbour __rcu **buckets = nht->hash_buckets;
352
353 if (size <= PAGE_SIZE)
354 kfree(buckets);
355 else
356 free_pages((unsigned long)buckets, get_order(size));
357 kfree(nht);
358}
359
360static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
361 unsigned long new_shift)
362{
363 unsigned int i, hash;
364 struct neigh_hash_table *new_nht, *old_nht;
365
366 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
367
368 old_nht = rcu_dereference_protected(tbl->nht,
369 lockdep_is_held(&tbl->lock));
370 new_nht = neigh_hash_alloc(new_shift);
371 if (!new_nht)
372 return old_nht;
373
374 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
375 struct neighbour *n, *next;
376
377 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
378 lockdep_is_held(&tbl->lock));
379 n != NULL;
380 n = next) {
381 hash = tbl->hash(n->primary_key, n->dev,
382 new_nht->hash_rnd);
383
384 hash >>= (32 - new_nht->hash_shift);
385 next = rcu_dereference_protected(n->next,
386 lockdep_is_held(&tbl->lock));
387
388 rcu_assign_pointer(n->next,
389 rcu_dereference_protected(
390 new_nht->hash_buckets[hash],
391 lockdep_is_held(&tbl->lock)));
392 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
393 }
394 }
395
396 rcu_assign_pointer(tbl->nht, new_nht);
397 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
398 return new_nht;
399}
400
401struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
402 struct net_device *dev)
403{
404 struct neighbour *n;
405
406 NEIGH_CACHE_STAT_INC(tbl, lookups);
407
408 rcu_read_lock_bh();
409 n = __neigh_lookup_noref(tbl, pkey, dev);
410 if (n) {
411 if (!atomic_inc_not_zero(&n->refcnt))
412 n = NULL;
413 NEIGH_CACHE_STAT_INC(tbl, hits);
414 }
415
416 rcu_read_unlock_bh();
417 return n;
418}
419EXPORT_SYMBOL(neigh_lookup);
420
421struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
422 const void *pkey)
423{
424 struct neighbour *n;
425 int key_len = tbl->key_len;
426 u32 hash_val;
427 struct neigh_hash_table *nht;
428
429 NEIGH_CACHE_STAT_INC(tbl, lookups);
430
431 rcu_read_lock_bh();
432 nht = rcu_dereference_bh(tbl->nht);
433 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
434
435 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
436 n != NULL;
437 n = rcu_dereference_bh(n->next)) {
438 if (!memcmp(n->primary_key, pkey, key_len) &&
439 net_eq(dev_net(n->dev), net)) {
440 if (!atomic_inc_not_zero(&n->refcnt))
441 n = NULL;
442 NEIGH_CACHE_STAT_INC(tbl, hits);
443 break;
444 }
445 }
446
447 rcu_read_unlock_bh();
448 return n;
449}
450EXPORT_SYMBOL(neigh_lookup_nodev);
451
452struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
453 struct net_device *dev, bool want_ref)
454{
455 u32 hash_val;
456 int key_len = tbl->key_len;
457 int error;
458 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
459 struct neigh_hash_table *nht;
460
461 if (!n) {
462 rc = ERR_PTR(-ENOBUFS);
463 goto out;
464 }
465
466 memcpy(n->primary_key, pkey, key_len);
467 n->dev = dev;
468 dev_hold(dev);
469
470 /* Protocol specific setup. */
471 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
472 rc = ERR_PTR(error);
473 goto out_neigh_release;
474 }
475
476 if (dev->netdev_ops->ndo_neigh_construct) {
477 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
478 if (error < 0) {
479 rc = ERR_PTR(error);
480 goto out_neigh_release;
481 }
482 }
483
484 /* Device specific setup. */
485 if (n->parms->neigh_setup &&
486 (error = n->parms->neigh_setup(n)) < 0) {
487 rc = ERR_PTR(error);
488 goto out_neigh_release;
489 }
490
491 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
492
493 write_lock_bh(&tbl->lock);
494 nht = rcu_dereference_protected(tbl->nht,
495 lockdep_is_held(&tbl->lock));
496
497 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
498 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
499
500 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
501
502 if (n->parms->dead) {
503 rc = ERR_PTR(-EINVAL);
504 goto out_tbl_unlock;
505 }
506
507 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
508 lockdep_is_held(&tbl->lock));
509 n1 != NULL;
510 n1 = rcu_dereference_protected(n1->next,
511 lockdep_is_held(&tbl->lock))) {
512 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
513 if (want_ref)
514 neigh_hold(n1);
515 rc = n1;
516 goto out_tbl_unlock;
517 }
518 }
519
520 n->dead = 0;
521 if (want_ref)
522 neigh_hold(n);
523 rcu_assign_pointer(n->next,
524 rcu_dereference_protected(nht->hash_buckets[hash_val],
525 lockdep_is_held(&tbl->lock)));
526 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
527 write_unlock_bh(&tbl->lock);
528 neigh_dbg(2, "neigh %p is created\n", n);
529 rc = n;
530out:
531 return rc;
532out_tbl_unlock:
533 write_unlock_bh(&tbl->lock);
534out_neigh_release:
535 neigh_release(n);
536 goto out;
537}
538EXPORT_SYMBOL(__neigh_create);
539
540static u32 pneigh_hash(const void *pkey, int key_len)
541{
542 u32 hash_val = *(u32 *)(pkey + key_len - 4);
543 hash_val ^= (hash_val >> 16);
544 hash_val ^= hash_val >> 8;
545 hash_val ^= hash_val >> 4;
546 hash_val &= PNEIGH_HASHMASK;
547 return hash_val;
548}
549
550static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
551 struct net *net,
552 const void *pkey,
553 int key_len,
554 struct net_device *dev)
555{
556 while (n) {
557 if (!memcmp(n->key, pkey, key_len) &&
558 net_eq(pneigh_net(n), net) &&
559 (n->dev == dev || !n->dev))
560 return n;
561 n = n->next;
562 }
563 return NULL;
564}
565
566struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
567 struct net *net, const void *pkey, struct net_device *dev)
568{
569 int key_len = tbl->key_len;
570 u32 hash_val = pneigh_hash(pkey, key_len);
571
572 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
573 net, pkey, key_len, dev);
574}
575EXPORT_SYMBOL_GPL(__pneigh_lookup);
576
577struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
578 struct net *net, const void *pkey,
579 struct net_device *dev, int creat)
580{
581 struct pneigh_entry *n;
582 int key_len = tbl->key_len;
583 u32 hash_val = pneigh_hash(pkey, key_len);
584
585 read_lock_bh(&tbl->lock);
586 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
587 net, pkey, key_len, dev);
588 read_unlock_bh(&tbl->lock);
589
590 if (n || !creat)
591 goto out;
592
593 ASSERT_RTNL();
594
595 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
596 if (!n)
597 goto out;
598
599 write_pnet(&n->net, net);
600 memcpy(n->key, pkey, key_len);
601 n->dev = dev;
602 if (dev)
603 dev_hold(dev);
604
605 if (tbl->pconstructor && tbl->pconstructor(n)) {
606 if (dev)
607 dev_put(dev);
608 kfree(n);
609 n = NULL;
610 goto out;
611 }
612
613 write_lock_bh(&tbl->lock);
614 n->next = tbl->phash_buckets[hash_val];
615 tbl->phash_buckets[hash_val] = n;
616 write_unlock_bh(&tbl->lock);
617out:
618 return n;
619}
620EXPORT_SYMBOL(pneigh_lookup);
621
622
623int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
624 struct net_device *dev)
625{
626 struct pneigh_entry *n, **np;
627 int key_len = tbl->key_len;
628 u32 hash_val = pneigh_hash(pkey, key_len);
629
630 write_lock_bh(&tbl->lock);
631 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
632 np = &n->next) {
633 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
634 net_eq(pneigh_net(n), net)) {
635 *np = n->next;
636 write_unlock_bh(&tbl->lock);
637 if (tbl->pdestructor)
638 tbl->pdestructor(n);
639 if (n->dev)
640 dev_put(n->dev);
641 kfree(n);
642 return 0;
643 }
644 }
645 write_unlock_bh(&tbl->lock);
646 return -ENOENT;
647}
648
649static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
650{
651 struct pneigh_entry *n, **np;
652 u32 h;
653
654 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
655 np = &tbl->phash_buckets[h];
656 while ((n = *np) != NULL) {
657 if (!dev || n->dev == dev) {
658 *np = n->next;
659 if (tbl->pdestructor)
660 tbl->pdestructor(n);
661 if (n->dev)
662 dev_put(n->dev);
663 kfree(n);
664 continue;
665 }
666 np = &n->next;
667 }
668 }
669 return -ENOENT;
670}
671
672static void neigh_parms_destroy(struct neigh_parms *parms);
673
674static inline void neigh_parms_put(struct neigh_parms *parms)
675{
676 if (atomic_dec_and_test(&parms->refcnt))
677 neigh_parms_destroy(parms);
678}
679
680/*
681 * neighbour must already be out of the table;
682 *
683 */
684void neigh_destroy(struct neighbour *neigh)
685{
686 struct net_device *dev = neigh->dev;
687
688 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
689
690 if (!neigh->dead) {
691 pr_warn("Destroying alive neighbour %p\n", neigh);
692 dump_stack();
693 return;
694 }
695
696 if (neigh_del_timer(neigh))
697 pr_warn("Impossible event\n");
698
699 write_lock_bh(&neigh->lock);
700 __skb_queue_purge(&neigh->arp_queue);
701 write_unlock_bh(&neigh->lock);
702 neigh->arp_queue_len_bytes = 0;
703
704 if (dev->netdev_ops->ndo_neigh_destroy)
705 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
706
707 dev_put(dev);
708 neigh_parms_put(neigh->parms);
709
710 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
711
712 atomic_dec(&neigh->tbl->entries);
713 kfree_rcu(neigh, rcu);
714}
715EXPORT_SYMBOL(neigh_destroy);
716
717/* Neighbour state is suspicious;
718 disable fast path.
719
720 Called with write_locked neigh.
721 */
722static void neigh_suspect(struct neighbour *neigh)
723{
724 neigh_dbg(2, "neigh %p is suspected\n", neigh);
725
726 neigh->output = neigh->ops->output;
727}
728
729/* Neighbour state is OK;
730 enable fast path.
731
732 Called with write_locked neigh.
733 */
734static void neigh_connect(struct neighbour *neigh)
735{
736 neigh_dbg(2, "neigh %p is connected\n", neigh);
737
738 neigh->output = neigh->ops->connected_output;
739}
740
741static void neigh_periodic_work(struct work_struct *work)
742{
743 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
744 struct neighbour *n;
745 struct neighbour __rcu **np;
746 unsigned int i;
747 struct neigh_hash_table *nht;
748
749 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
750
751 write_lock_bh(&tbl->lock);
752 nht = rcu_dereference_protected(tbl->nht,
753 lockdep_is_held(&tbl->lock));
754
755 /*
756 * periodically recompute ReachableTime from random function
757 */
758
759 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
760 struct neigh_parms *p;
761 tbl->last_rand = jiffies;
762 list_for_each_entry(p, &tbl->parms_list, list)
763 p->reachable_time =
764 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
765 }
766
767 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
768 goto out;
769
770 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
771 np = &nht->hash_buckets[i];
772
773 while ((n = rcu_dereference_protected(*np,
774 lockdep_is_held(&tbl->lock))) != NULL) {
775 unsigned int state;
776
777 write_lock(&n->lock);
778
779 state = n->nud_state;
780 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
781 write_unlock(&n->lock);
782 goto next_elt;
783 }
784
785 if (time_before(n->used, n->confirmed))
786 n->used = n->confirmed;
787
788 if (atomic_read(&n->refcnt) == 1 &&
789 (state == NUD_FAILED ||
790 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
791 *np = n->next;
792 n->dead = 1;
793 write_unlock(&n->lock);
794 neigh_cleanup_and_release(n);
795 continue;
796 }
797 write_unlock(&n->lock);
798
799next_elt:
800 np = &n->next;
801 }
802 /*
803 * It's fine to release lock here, even if hash table
804 * grows while we are preempted.
805 */
806 write_unlock_bh(&tbl->lock);
807 cond_resched();
808 write_lock_bh(&tbl->lock);
809 nht = rcu_dereference_protected(tbl->nht,
810 lockdep_is_held(&tbl->lock));
811 }
812out:
813 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
814 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
815 * BASE_REACHABLE_TIME.
816 */
817 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
818 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
819 write_unlock_bh(&tbl->lock);
820}
821
822static __inline__ int neigh_max_probes(struct neighbour *n)
823{
824 struct neigh_parms *p = n->parms;
825 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
826 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
827 NEIGH_VAR(p, MCAST_PROBES));
828}
829
830static void neigh_invalidate(struct neighbour *neigh)
831 __releases(neigh->lock)
832 __acquires(neigh->lock)
833{
834 struct sk_buff *skb;
835
836 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
837 neigh_dbg(2, "neigh %p is failed\n", neigh);
838 neigh->updated = jiffies;
839
840 /* It is very thin place. report_unreachable is very complicated
841 routine. Particularly, it can hit the same neighbour entry!
842
843 So that, we try to be accurate and avoid dead loop. --ANK
844 */
845 while (neigh->nud_state == NUD_FAILED &&
846 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
847 write_unlock(&neigh->lock);
848 neigh->ops->error_report(neigh, skb);
849 write_lock(&neigh->lock);
850 }
851 __skb_queue_purge(&neigh->arp_queue);
852 neigh->arp_queue_len_bytes = 0;
853}
854
855static void neigh_probe(struct neighbour *neigh)
856 __releases(neigh->lock)
857{
858 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
859 /* keep skb alive even if arp_queue overflows */
860 if (skb)
861 skb = skb_clone(skb, GFP_ATOMIC);
862 write_unlock(&neigh->lock);
863 neigh->ops->solicit(neigh, skb);
864 atomic_inc(&neigh->probes);
865 kfree_skb(skb);
866}
867
868/* Called when a timer expires for a neighbour entry. */
869
870static void neigh_timer_handler(unsigned long arg)
871{
872 unsigned long now, next;
873 struct neighbour *neigh = (struct neighbour *)arg;
874 unsigned int state;
875 int notify = 0;
876
877 write_lock(&neigh->lock);
878
879 state = neigh->nud_state;
880 now = jiffies;
881 next = now + HZ;
882
883 if (!(state & NUD_IN_TIMER))
884 goto out;
885
886 if (state & NUD_REACHABLE) {
887 if (time_before_eq(now,
888 neigh->confirmed + neigh->parms->reachable_time)) {
889 neigh_dbg(2, "neigh %p is still alive\n", neigh);
890 next = neigh->confirmed + neigh->parms->reachable_time;
891 } else if (time_before_eq(now,
892 neigh->used +
893 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
894 neigh_dbg(2, "neigh %p is delayed\n", neigh);
895 neigh->nud_state = NUD_DELAY;
896 neigh->updated = jiffies;
897 neigh_suspect(neigh);
898 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
899 } else {
900 neigh_dbg(2, "neigh %p is suspected\n", neigh);
901 neigh->nud_state = NUD_STALE;
902 neigh->updated = jiffies;
903 neigh_suspect(neigh);
904 notify = 1;
905 }
906 } else if (state & NUD_DELAY) {
907 if (time_before_eq(now,
908 neigh->confirmed +
909 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
910 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
911 neigh->nud_state = NUD_REACHABLE;
912 neigh->updated = jiffies;
913 neigh_connect(neigh);
914 notify = 1;
915 next = neigh->confirmed + neigh->parms->reachable_time;
916 } else {
917 neigh_dbg(2, "neigh %p is probed\n", neigh);
918 neigh->nud_state = NUD_PROBE;
919 neigh->updated = jiffies;
920 atomic_set(&neigh->probes, 0);
921 notify = 1;
922 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
923 }
924 } else {
925 /* NUD_PROBE|NUD_INCOMPLETE */
926 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
927 }
928
929 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
930 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
931 neigh->nud_state = NUD_FAILED;
932 notify = 1;
933 neigh_invalidate(neigh);
934 goto out;
935 }
936
937 if (neigh->nud_state & NUD_IN_TIMER) {
938 if (time_before(next, jiffies + HZ/2))
939 next = jiffies + HZ/2;
940 if (!mod_timer(&neigh->timer, next))
941 neigh_hold(neigh);
942 }
943 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
944 neigh_probe(neigh);
945 } else {
946out:
947 write_unlock(&neigh->lock);
948 }
949
950 if (notify)
951 neigh_update_notify(neigh);
952
953 neigh_release(neigh);
954}
955
956int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
957{
958 int rc;
959 bool immediate_probe = false;
960
961 write_lock_bh(&neigh->lock);
962
963 rc = 0;
964 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
965 goto out_unlock_bh;
966 if (neigh->dead)
967 goto out_dead;
968
969 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
970 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
971 NEIGH_VAR(neigh->parms, APP_PROBES)) {
972 unsigned long next, now = jiffies;
973
974 atomic_set(&neigh->probes,
975 NEIGH_VAR(neigh->parms, UCAST_PROBES));
976 neigh->nud_state = NUD_INCOMPLETE;
977 neigh->updated = now;
978 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
979 HZ/2);
980 neigh_add_timer(neigh, next);
981 immediate_probe = true;
982 } else {
983 neigh->nud_state = NUD_FAILED;
984 neigh->updated = jiffies;
985 write_unlock_bh(&neigh->lock);
986
987 kfree_skb(skb);
988 return 1;
989 }
990 } else if (neigh->nud_state & NUD_STALE) {
991 neigh_dbg(2, "neigh %p is delayed\n", neigh);
992 neigh->nud_state = NUD_DELAY;
993 neigh->updated = jiffies;
994 neigh_add_timer(neigh, jiffies +
995 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
996 }
997
998 if (neigh->nud_state == NUD_INCOMPLETE) {
999 if (skb) {
1000 while (neigh->arp_queue_len_bytes + skb->truesize >
1001 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1002 struct sk_buff *buff;
1003
1004 buff = __skb_dequeue(&neigh->arp_queue);
1005 if (!buff)
1006 break;
1007 neigh->arp_queue_len_bytes -= buff->truesize;
1008 kfree_skb(buff);
1009 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1010 }
1011 skb_dst_force(skb);
1012 __skb_queue_tail(&neigh->arp_queue, skb);
1013 neigh->arp_queue_len_bytes += skb->truesize;
1014 }
1015 rc = 1;
1016 }
1017out_unlock_bh:
1018 if (immediate_probe)
1019 neigh_probe(neigh);
1020 else
1021 write_unlock(&neigh->lock);
1022 local_bh_enable();
1023 return rc;
1024
1025out_dead:
1026 if (neigh->nud_state & NUD_STALE)
1027 goto out_unlock_bh;
1028 write_unlock_bh(&neigh->lock);
1029 kfree_skb(skb);
1030 return 1;
1031}
1032EXPORT_SYMBOL(__neigh_event_send);
1033
1034static void neigh_update_hhs(struct neighbour *neigh)
1035{
1036 struct hh_cache *hh;
1037 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1038 = NULL;
1039
1040 if (neigh->dev->header_ops)
1041 update = neigh->dev->header_ops->cache_update;
1042
1043 if (update) {
1044 hh = &neigh->hh;
1045 if (hh->hh_len) {
1046 write_seqlock_bh(&hh->hh_lock);
1047 update(hh, neigh->dev, neigh->ha);
1048 write_sequnlock_bh(&hh->hh_lock);
1049 }
1050 }
1051}
1052
1053
1054
1055/* Generic update routine.
1056 -- lladdr is new lladdr or NULL, if it is not supplied.
1057 -- new is new state.
1058 -- flags
1059 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1060 if it is different.
1061 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1062 lladdr instead of overriding it
1063 if it is different.
1064 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1065
1066 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1067 NTF_ROUTER flag.
1068 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1069 a router.
1070
1071 Caller MUST hold reference count on the entry.
1072 */
1073
1074int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1075 u32 flags)
1076{
1077 u8 old;
1078 int err;
1079 int notify = 0;
1080 struct net_device *dev;
1081 int update_isrouter = 0;
1082
1083 write_lock_bh(&neigh->lock);
1084
1085 dev = neigh->dev;
1086 old = neigh->nud_state;
1087 err = -EPERM;
1088
1089 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1090 (old & (NUD_NOARP | NUD_PERMANENT)))
1091 goto out;
1092 if (neigh->dead)
1093 goto out;
1094
1095 if (!(new & NUD_VALID)) {
1096 neigh_del_timer(neigh);
1097 if (old & NUD_CONNECTED)
1098 neigh_suspect(neigh);
1099 neigh->nud_state = new;
1100 err = 0;
1101 notify = old & NUD_VALID;
1102 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1103 (new & NUD_FAILED)) {
1104 neigh_invalidate(neigh);
1105 notify = 1;
1106 }
1107 goto out;
1108 }
1109
1110 /* Compare new lladdr with cached one */
1111 if (!dev->addr_len) {
1112 /* First case: device needs no address. */
1113 lladdr = neigh->ha;
1114 } else if (lladdr) {
1115 /* The second case: if something is already cached
1116 and a new address is proposed:
1117 - compare new & old
1118 - if they are different, check override flag
1119 */
1120 if ((old & NUD_VALID) &&
1121 !memcmp(lladdr, neigh->ha, dev->addr_len))
1122 lladdr = neigh->ha;
1123 } else {
1124 /* No address is supplied; if we know something,
1125 use it, otherwise discard the request.
1126 */
1127 err = -EINVAL;
1128 if (!(old & NUD_VALID))
1129 goto out;
1130 lladdr = neigh->ha;
1131 }
1132
1133 if (new & NUD_CONNECTED)
1134 neigh->confirmed = jiffies;
1135 neigh->updated = jiffies;
1136
1137 /* If entry was valid and address is not changed,
1138 do not change entry state, if new one is STALE.
1139 */
1140 err = 0;
1141 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1142 if (old & NUD_VALID) {
1143 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1144 update_isrouter = 0;
1145 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1146 (old & NUD_CONNECTED)) {
1147 lladdr = neigh->ha;
1148 new = NUD_STALE;
1149 } else
1150 goto out;
1151 } else {
1152 if (lladdr == neigh->ha && new == NUD_STALE &&
1153 !(flags & NEIGH_UPDATE_F_ADMIN))
1154 new = old;
1155 }
1156 }
1157
1158 if (new != old) {
1159 neigh_del_timer(neigh);
1160 if (new & NUD_PROBE)
1161 atomic_set(&neigh->probes, 0);
1162 if (new & NUD_IN_TIMER)
1163 neigh_add_timer(neigh, (jiffies +
1164 ((new & NUD_REACHABLE) ?
1165 neigh->parms->reachable_time :
1166 0)));
1167 neigh->nud_state = new;
1168 notify = 1;
1169 }
1170
1171 if (lladdr != neigh->ha) {
1172 write_seqlock(&neigh->ha_lock);
1173 memcpy(&neigh->ha, lladdr, dev->addr_len);
1174 write_sequnlock(&neigh->ha_lock);
1175 neigh_update_hhs(neigh);
1176 if (!(new & NUD_CONNECTED))
1177 neigh->confirmed = jiffies -
1178 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1179 notify = 1;
1180 }
1181 if (new == old)
1182 goto out;
1183 if (new & NUD_CONNECTED)
1184 neigh_connect(neigh);
1185 else
1186 neigh_suspect(neigh);
1187 if (!(old & NUD_VALID)) {
1188 struct sk_buff *skb;
1189
1190 /* Again: avoid dead loop if something went wrong */
1191
1192 while (neigh->nud_state & NUD_VALID &&
1193 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1194 struct dst_entry *dst = skb_dst(skb);
1195 struct neighbour *n2, *n1 = neigh;
1196 write_unlock_bh(&neigh->lock);
1197
1198 rcu_read_lock();
1199
1200 /* Why not just use 'neigh' as-is? The problem is that
1201 * things such as shaper, eql, and sch_teql can end up
1202 * using alternative, different, neigh objects to output
1203 * the packet in the output path. So what we need to do
1204 * here is re-lookup the top-level neigh in the path so
1205 * we can reinject the packet there.
1206 */
1207 n2 = NULL;
1208 if (dst) {
1209 n2 = dst_neigh_lookup_skb(dst, skb);
1210 if (n2)
1211 n1 = n2;
1212 }
1213 n1->output(n1, skb);
1214 if (n2)
1215 neigh_release(n2);
1216 rcu_read_unlock();
1217
1218 write_lock_bh(&neigh->lock);
1219 }
1220 __skb_queue_purge(&neigh->arp_queue);
1221 neigh->arp_queue_len_bytes = 0;
1222 }
1223out:
1224 if (update_isrouter) {
1225 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1226 (neigh->flags | NTF_ROUTER) :
1227 (neigh->flags & ~NTF_ROUTER);
1228 }
1229 write_unlock_bh(&neigh->lock);
1230
1231 if (notify)
1232 neigh_update_notify(neigh);
1233
1234 return err;
1235}
1236EXPORT_SYMBOL(neigh_update);
1237
1238/* Update the neigh to listen temporarily for probe responses, even if it is
1239 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1240 */
1241void __neigh_set_probe_once(struct neighbour *neigh)
1242{
1243 if (neigh->dead)
1244 return;
1245 neigh->updated = jiffies;
1246 if (!(neigh->nud_state & NUD_FAILED))
1247 return;
1248 neigh->nud_state = NUD_INCOMPLETE;
1249 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1250 neigh_add_timer(neigh,
1251 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1252}
1253EXPORT_SYMBOL(__neigh_set_probe_once);
1254
1255struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1256 u8 *lladdr, void *saddr,
1257 struct net_device *dev)
1258{
1259 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1260 lladdr || !dev->addr_len);
1261 if (neigh)
1262 neigh_update(neigh, lladdr, NUD_STALE,
1263 NEIGH_UPDATE_F_OVERRIDE);
1264 return neigh;
1265}
1266EXPORT_SYMBOL(neigh_event_ns);
1267
1268/* called with read_lock_bh(&n->lock); */
1269static void neigh_hh_init(struct neighbour *n)
1270{
1271 struct net_device *dev = n->dev;
1272 __be16 prot = n->tbl->protocol;
1273 struct hh_cache *hh = &n->hh;
1274
1275 write_lock_bh(&n->lock);
1276
1277 /* Only one thread can come in here and initialize the
1278 * hh_cache entry.
1279 */
1280 if (!hh->hh_len)
1281 dev->header_ops->cache(n, hh, prot);
1282
1283 write_unlock_bh(&n->lock);
1284}
1285
1286/* Slow and careful. */
1287
1288int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1289{
1290 int rc = 0;
1291
1292 if (!neigh_event_send(neigh, skb)) {
1293 int err;
1294 struct net_device *dev = neigh->dev;
1295 unsigned int seq;
1296
1297 if (dev->header_ops->cache && !neigh->hh.hh_len)
1298 neigh_hh_init(neigh);
1299
1300 do {
1301 __skb_pull(skb, skb_network_offset(skb));
1302 seq = read_seqbegin(&neigh->ha_lock);
1303 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1304 neigh->ha, NULL, skb->len);
1305 } while (read_seqretry(&neigh->ha_lock, seq));
1306
1307 if (err >= 0)
1308 rc = dev_queue_xmit(skb);
1309 else
1310 goto out_kfree_skb;
1311 }
1312out:
1313 return rc;
1314out_kfree_skb:
1315 rc = -EINVAL;
1316 kfree_skb(skb);
1317 goto out;
1318}
1319EXPORT_SYMBOL(neigh_resolve_output);
1320
1321/* As fast as possible without hh cache */
1322
1323int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1324{
1325 struct net_device *dev = neigh->dev;
1326 unsigned int seq;
1327 int err;
1328
1329 do {
1330 __skb_pull(skb, skb_network_offset(skb));
1331 seq = read_seqbegin(&neigh->ha_lock);
1332 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1333 neigh->ha, NULL, skb->len);
1334 } while (read_seqretry(&neigh->ha_lock, seq));
1335
1336 if (err >= 0)
1337 err = dev_queue_xmit(skb);
1338 else {
1339 err = -EINVAL;
1340 kfree_skb(skb);
1341 }
1342 return err;
1343}
1344EXPORT_SYMBOL(neigh_connected_output);
1345
1346int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1347{
1348 return dev_queue_xmit(skb);
1349}
1350EXPORT_SYMBOL(neigh_direct_output);
1351
1352static void neigh_proxy_process(unsigned long arg)
1353{
1354 struct neigh_table *tbl = (struct neigh_table *)arg;
1355 long sched_next = 0;
1356 unsigned long now = jiffies;
1357 struct sk_buff *skb, *n;
1358
1359 spin_lock(&tbl->proxy_queue.lock);
1360
1361 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1362 long tdif = NEIGH_CB(skb)->sched_next - now;
1363
1364 if (tdif <= 0) {
1365 struct net_device *dev = skb->dev;
1366
1367 __skb_unlink(skb, &tbl->proxy_queue);
1368 if (tbl->proxy_redo && netif_running(dev)) {
1369 rcu_read_lock();
1370 tbl->proxy_redo(skb);
1371 rcu_read_unlock();
1372 } else {
1373 kfree_skb(skb);
1374 }
1375
1376 dev_put(dev);
1377 } else if (!sched_next || tdif < sched_next)
1378 sched_next = tdif;
1379 }
1380 del_timer(&tbl->proxy_timer);
1381 if (sched_next)
1382 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1383 spin_unlock(&tbl->proxy_queue.lock);
1384}
1385
1386void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1387 struct sk_buff *skb)
1388{
1389 unsigned long now = jiffies;
1390
1391 unsigned long sched_next = now + (prandom_u32() %
1392 NEIGH_VAR(p, PROXY_DELAY));
1393
1394 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1395 kfree_skb(skb);
1396 return;
1397 }
1398
1399 NEIGH_CB(skb)->sched_next = sched_next;
1400 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1401
1402 spin_lock(&tbl->proxy_queue.lock);
1403 if (del_timer(&tbl->proxy_timer)) {
1404 if (time_before(tbl->proxy_timer.expires, sched_next))
1405 sched_next = tbl->proxy_timer.expires;
1406 }
1407 skb_dst_drop(skb);
1408 dev_hold(skb->dev);
1409 __skb_queue_tail(&tbl->proxy_queue, skb);
1410 mod_timer(&tbl->proxy_timer, sched_next);
1411 spin_unlock(&tbl->proxy_queue.lock);
1412}
1413EXPORT_SYMBOL(pneigh_enqueue);
1414
1415static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1416 struct net *net, int ifindex)
1417{
1418 struct neigh_parms *p;
1419
1420 list_for_each_entry(p, &tbl->parms_list, list) {
1421 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1422 (!p->dev && !ifindex && net_eq(net, &init_net)))
1423 return p;
1424 }
1425
1426 return NULL;
1427}
1428
1429struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1430 struct neigh_table *tbl)
1431{
1432 struct neigh_parms *p;
1433 struct net *net = dev_net(dev);
1434 const struct net_device_ops *ops = dev->netdev_ops;
1435
1436 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1437 if (p) {
1438 p->tbl = tbl;
1439 atomic_set(&p->refcnt, 1);
1440 p->reachable_time =
1441 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1442 dev_hold(dev);
1443 p->dev = dev;
1444 write_pnet(&p->net, net);
1445 p->sysctl_table = NULL;
1446
1447 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1448 dev_put(dev);
1449 kfree(p);
1450 return NULL;
1451 }
1452
1453 write_lock_bh(&tbl->lock);
1454 list_add(&p->list, &tbl->parms.list);
1455 write_unlock_bh(&tbl->lock);
1456
1457 neigh_parms_data_state_cleanall(p);
1458 }
1459 return p;
1460}
1461EXPORT_SYMBOL(neigh_parms_alloc);
1462
1463static void neigh_rcu_free_parms(struct rcu_head *head)
1464{
1465 struct neigh_parms *parms =
1466 container_of(head, struct neigh_parms, rcu_head);
1467
1468 neigh_parms_put(parms);
1469}
1470
1471void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1472{
1473 if (!parms || parms == &tbl->parms)
1474 return;
1475 write_lock_bh(&tbl->lock);
1476 list_del(&parms->list);
1477 parms->dead = 1;
1478 write_unlock_bh(&tbl->lock);
1479 if (parms->dev)
1480 dev_put(parms->dev);
1481 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1482}
1483EXPORT_SYMBOL(neigh_parms_release);
1484
1485static void neigh_parms_destroy(struct neigh_parms *parms)
1486{
1487 kfree(parms);
1488}
1489
1490static struct lock_class_key neigh_table_proxy_queue_class;
1491
1492static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1493
1494void neigh_table_init(int index, struct neigh_table *tbl)
1495{
1496 unsigned long now = jiffies;
1497 unsigned long phsize;
1498
1499 INIT_LIST_HEAD(&tbl->parms_list);
1500 list_add(&tbl->parms.list, &tbl->parms_list);
1501 write_pnet(&tbl->parms.net, &init_net);
1502 atomic_set(&tbl->parms.refcnt, 1);
1503 tbl->parms.reachable_time =
1504 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1505
1506 tbl->stats = alloc_percpu(struct neigh_statistics);
1507 if (!tbl->stats)
1508 panic("cannot create neighbour cache statistics");
1509
1510#ifdef CONFIG_PROC_FS
1511 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1512 &neigh_stat_seq_fops, tbl))
1513 panic("cannot create neighbour proc dir entry");
1514#endif
1515
1516 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1517
1518 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1519 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1520
1521 if (!tbl->nht || !tbl->phash_buckets)
1522 panic("cannot allocate neighbour cache hashes");
1523
1524 if (!tbl->entry_size)
1525 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1526 tbl->key_len, NEIGH_PRIV_ALIGN);
1527 else
1528 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1529
1530 rwlock_init(&tbl->lock);
1531 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1532 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1533 tbl->parms.reachable_time);
1534 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1535 skb_queue_head_init_class(&tbl->proxy_queue,
1536 &neigh_table_proxy_queue_class);
1537
1538 tbl->last_flush = now;
1539 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1540
1541 neigh_tables[index] = tbl;
1542}
1543EXPORT_SYMBOL(neigh_table_init);
1544
1545int neigh_table_clear(int index, struct neigh_table *tbl)
1546{
1547 neigh_tables[index] = NULL;
1548 /* It is not clean... Fix it to unload IPv6 module safely */
1549 cancel_delayed_work_sync(&tbl->gc_work);
1550 del_timer_sync(&tbl->proxy_timer);
1551 pneigh_queue_purge(&tbl->proxy_queue);
1552 neigh_ifdown(tbl, NULL);
1553 if (atomic_read(&tbl->entries))
1554 pr_crit("neighbour leakage\n");
1555
1556 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1557 neigh_hash_free_rcu);
1558 tbl->nht = NULL;
1559
1560 kfree(tbl->phash_buckets);
1561 tbl->phash_buckets = NULL;
1562
1563 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1564
1565 free_percpu(tbl->stats);
1566 tbl->stats = NULL;
1567
1568 return 0;
1569}
1570EXPORT_SYMBOL(neigh_table_clear);
1571
1572static struct neigh_table *neigh_find_table(int family)
1573{
1574 struct neigh_table *tbl = NULL;
1575
1576 switch (family) {
1577 case AF_INET:
1578 tbl = neigh_tables[NEIGH_ARP_TABLE];
1579 break;
1580 case AF_INET6:
1581 tbl = neigh_tables[NEIGH_ND_TABLE];
1582 break;
1583 case AF_DECnet:
1584 tbl = neigh_tables[NEIGH_DN_TABLE];
1585 break;
1586 }
1587
1588 return tbl;
1589}
1590
1591static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1592{
1593 struct net *net = sock_net(skb->sk);
1594 struct ndmsg *ndm;
1595 struct nlattr *dst_attr;
1596 struct neigh_table *tbl;
1597 struct neighbour *neigh;
1598 struct net_device *dev = NULL;
1599 int err = -EINVAL;
1600
1601 ASSERT_RTNL();
1602 if (nlmsg_len(nlh) < sizeof(*ndm))
1603 goto out;
1604
1605 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1606 if (dst_attr == NULL)
1607 goto out;
1608
1609 ndm = nlmsg_data(nlh);
1610 if (ndm->ndm_ifindex) {
1611 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1612 if (dev == NULL) {
1613 err = -ENODEV;
1614 goto out;
1615 }
1616 }
1617
1618 tbl = neigh_find_table(ndm->ndm_family);
1619 if (tbl == NULL)
1620 return -EAFNOSUPPORT;
1621
1622 if (nla_len(dst_attr) < tbl->key_len)
1623 goto out;
1624
1625 if (ndm->ndm_flags & NTF_PROXY) {
1626 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1627 goto out;
1628 }
1629
1630 if (dev == NULL)
1631 goto out;
1632
1633 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1634 if (neigh == NULL) {
1635 err = -ENOENT;
1636 goto out;
1637 }
1638
1639 err = neigh_update(neigh, NULL, NUD_FAILED,
1640 NEIGH_UPDATE_F_OVERRIDE |
1641 NEIGH_UPDATE_F_ADMIN);
1642 neigh_release(neigh);
1643
1644out:
1645 return err;
1646}
1647
1648static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1649{
1650 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1651 struct net *net = sock_net(skb->sk);
1652 struct ndmsg *ndm;
1653 struct nlattr *tb[NDA_MAX+1];
1654 struct neigh_table *tbl;
1655 struct net_device *dev = NULL;
1656 struct neighbour *neigh;
1657 void *dst, *lladdr;
1658 int err;
1659
1660 ASSERT_RTNL();
1661 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1662 if (err < 0)
1663 goto out;
1664
1665 err = -EINVAL;
1666 if (tb[NDA_DST] == NULL)
1667 goto out;
1668
1669 ndm = nlmsg_data(nlh);
1670 if (ndm->ndm_ifindex) {
1671 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1672 if (dev == NULL) {
1673 err = -ENODEV;
1674 goto out;
1675 }
1676
1677 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1678 goto out;
1679 }
1680
1681 tbl = neigh_find_table(ndm->ndm_family);
1682 if (tbl == NULL)
1683 return -EAFNOSUPPORT;
1684
1685 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1686 goto out;
1687 dst = nla_data(tb[NDA_DST]);
1688 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1689
1690 if (ndm->ndm_flags & NTF_PROXY) {
1691 struct pneigh_entry *pn;
1692
1693 err = -ENOBUFS;
1694 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1695 if (pn) {
1696 pn->flags = ndm->ndm_flags;
1697 err = 0;
1698 }
1699 goto out;
1700 }
1701
1702 if (dev == NULL)
1703 goto out;
1704
1705 neigh = neigh_lookup(tbl, dst, dev);
1706 if (neigh == NULL) {
1707 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1708 err = -ENOENT;
1709 goto out;
1710 }
1711
1712 neigh = __neigh_lookup_errno(tbl, dst, dev);
1713 if (IS_ERR(neigh)) {
1714 err = PTR_ERR(neigh);
1715 goto out;
1716 }
1717 } else {
1718 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1719 err = -EEXIST;
1720 neigh_release(neigh);
1721 goto out;
1722 }
1723
1724 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1725 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1726 }
1727
1728 if (ndm->ndm_flags & NTF_USE) {
1729 neigh_event_send(neigh, NULL);
1730 err = 0;
1731 } else
1732 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1733 neigh_release(neigh);
1734
1735out:
1736 return err;
1737}
1738
1739static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1740{
1741 struct nlattr *nest;
1742
1743 nest = nla_nest_start(skb, NDTA_PARMS);
1744 if (nest == NULL)
1745 return -ENOBUFS;
1746
1747 if ((parms->dev &&
1748 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1749 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1750 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1751 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1752 /* approximative value for deprecated QUEUE_LEN (in packets) */
1753 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1754 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1755 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1756 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1757 nla_put_u32(skb, NDTPA_UCAST_PROBES,
1758 NEIGH_VAR(parms, UCAST_PROBES)) ||
1759 nla_put_u32(skb, NDTPA_MCAST_PROBES,
1760 NEIGH_VAR(parms, MCAST_PROBES)) ||
1761 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1762 NEIGH_VAR(parms, MCAST_REPROBES)) ||
1763 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1764 NDTPA_PAD) ||
1765 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1766 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1767 nla_put_msecs(skb, NDTPA_GC_STALETIME,
1768 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1769 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1770 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1771 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1772 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1773 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1774 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1775 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1776 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1777 nla_put_msecs(skb, NDTPA_LOCKTIME,
1778 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1779 goto nla_put_failure;
1780 return nla_nest_end(skb, nest);
1781
1782nla_put_failure:
1783 nla_nest_cancel(skb, nest);
1784 return -EMSGSIZE;
1785}
1786
1787static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1788 u32 pid, u32 seq, int type, int flags)
1789{
1790 struct nlmsghdr *nlh;
1791 struct ndtmsg *ndtmsg;
1792
1793 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1794 if (nlh == NULL)
1795 return -EMSGSIZE;
1796
1797 ndtmsg = nlmsg_data(nlh);
1798
1799 read_lock_bh(&tbl->lock);
1800 ndtmsg->ndtm_family = tbl->family;
1801 ndtmsg->ndtm_pad1 = 0;
1802 ndtmsg->ndtm_pad2 = 0;
1803
1804 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1805 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1806 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1807 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1808 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1809 goto nla_put_failure;
1810 {
1811 unsigned long now = jiffies;
1812 unsigned int flush_delta = now - tbl->last_flush;
1813 unsigned int rand_delta = now - tbl->last_rand;
1814 struct neigh_hash_table *nht;
1815 struct ndt_config ndc = {
1816 .ndtc_key_len = tbl->key_len,
1817 .ndtc_entry_size = tbl->entry_size,
1818 .ndtc_entries = atomic_read(&tbl->entries),
1819 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1820 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1821 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1822 };
1823
1824 rcu_read_lock_bh();
1825 nht = rcu_dereference_bh(tbl->nht);
1826 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1827 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1828 rcu_read_unlock_bh();
1829
1830 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1831 goto nla_put_failure;
1832 }
1833
1834 {
1835 int cpu;
1836 struct ndt_stats ndst;
1837
1838 memset(&ndst, 0, sizeof(ndst));
1839
1840 for_each_possible_cpu(cpu) {
1841 struct neigh_statistics *st;
1842
1843 st = per_cpu_ptr(tbl->stats, cpu);
1844 ndst.ndts_allocs += st->allocs;
1845 ndst.ndts_destroys += st->destroys;
1846 ndst.ndts_hash_grows += st->hash_grows;
1847 ndst.ndts_res_failed += st->res_failed;
1848 ndst.ndts_lookups += st->lookups;
1849 ndst.ndts_hits += st->hits;
1850 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1851 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1852 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1853 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1854 ndst.ndts_table_fulls += st->table_fulls;
1855 }
1856
1857 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1858 NDTA_PAD))
1859 goto nla_put_failure;
1860 }
1861
1862 BUG_ON(tbl->parms.dev);
1863 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1864 goto nla_put_failure;
1865
1866 read_unlock_bh(&tbl->lock);
1867 nlmsg_end(skb, nlh);
1868 return 0;
1869
1870nla_put_failure:
1871 read_unlock_bh(&tbl->lock);
1872 nlmsg_cancel(skb, nlh);
1873 return -EMSGSIZE;
1874}
1875
1876static int neightbl_fill_param_info(struct sk_buff *skb,
1877 struct neigh_table *tbl,
1878 struct neigh_parms *parms,
1879 u32 pid, u32 seq, int type,
1880 unsigned int flags)
1881{
1882 struct ndtmsg *ndtmsg;
1883 struct nlmsghdr *nlh;
1884
1885 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1886 if (nlh == NULL)
1887 return -EMSGSIZE;
1888
1889 ndtmsg = nlmsg_data(nlh);
1890
1891 read_lock_bh(&tbl->lock);
1892 ndtmsg->ndtm_family = tbl->family;
1893 ndtmsg->ndtm_pad1 = 0;
1894 ndtmsg->ndtm_pad2 = 0;
1895
1896 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1897 neightbl_fill_parms(skb, parms) < 0)
1898 goto errout;
1899
1900 read_unlock_bh(&tbl->lock);
1901 nlmsg_end(skb, nlh);
1902 return 0;
1903errout:
1904 read_unlock_bh(&tbl->lock);
1905 nlmsg_cancel(skb, nlh);
1906 return -EMSGSIZE;
1907}
1908
1909static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1910 [NDTA_NAME] = { .type = NLA_STRING },
1911 [NDTA_THRESH1] = { .type = NLA_U32 },
1912 [NDTA_THRESH2] = { .type = NLA_U32 },
1913 [NDTA_THRESH3] = { .type = NLA_U32 },
1914 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1915 [NDTA_PARMS] = { .type = NLA_NESTED },
1916};
1917
1918static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1919 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1920 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1921 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1922 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1923 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1924 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1925 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
1926 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1927 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1928 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1929 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1930 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1931 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1932 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1933};
1934
1935static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1936{
1937 struct net *net = sock_net(skb->sk);
1938 struct neigh_table *tbl;
1939 struct ndtmsg *ndtmsg;
1940 struct nlattr *tb[NDTA_MAX+1];
1941 bool found = false;
1942 int err, tidx;
1943
1944 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1945 nl_neightbl_policy);
1946 if (err < 0)
1947 goto errout;
1948
1949 if (tb[NDTA_NAME] == NULL) {
1950 err = -EINVAL;
1951 goto errout;
1952 }
1953
1954 ndtmsg = nlmsg_data(nlh);
1955
1956 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1957 tbl = neigh_tables[tidx];
1958 if (!tbl)
1959 continue;
1960 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1961 continue;
1962 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1963 found = true;
1964 break;
1965 }
1966 }
1967
1968 if (!found)
1969 return -ENOENT;
1970
1971 /*
1972 * We acquire tbl->lock to be nice to the periodic timers and
1973 * make sure they always see a consistent set of values.
1974 */
1975 write_lock_bh(&tbl->lock);
1976
1977 if (tb[NDTA_PARMS]) {
1978 struct nlattr *tbp[NDTPA_MAX+1];
1979 struct neigh_parms *p;
1980 int i, ifindex = 0;
1981
1982 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1983 nl_ntbl_parm_policy);
1984 if (err < 0)
1985 goto errout_tbl_lock;
1986
1987 if (tbp[NDTPA_IFINDEX])
1988 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1989
1990 p = lookup_neigh_parms(tbl, net, ifindex);
1991 if (p == NULL) {
1992 err = -ENOENT;
1993 goto errout_tbl_lock;
1994 }
1995
1996 for (i = 1; i <= NDTPA_MAX; i++) {
1997 if (tbp[i] == NULL)
1998 continue;
1999
2000 switch (i) {
2001 case NDTPA_QUEUE_LEN:
2002 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2003 nla_get_u32(tbp[i]) *
2004 SKB_TRUESIZE(ETH_FRAME_LEN));
2005 break;
2006 case NDTPA_QUEUE_LENBYTES:
2007 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2008 nla_get_u32(tbp[i]));
2009 break;
2010 case NDTPA_PROXY_QLEN:
2011 NEIGH_VAR_SET(p, PROXY_QLEN,
2012 nla_get_u32(tbp[i]));
2013 break;
2014 case NDTPA_APP_PROBES:
2015 NEIGH_VAR_SET(p, APP_PROBES,
2016 nla_get_u32(tbp[i]));
2017 break;
2018 case NDTPA_UCAST_PROBES:
2019 NEIGH_VAR_SET(p, UCAST_PROBES,
2020 nla_get_u32(tbp[i]));
2021 break;
2022 case NDTPA_MCAST_PROBES:
2023 NEIGH_VAR_SET(p, MCAST_PROBES,
2024 nla_get_u32(tbp[i]));
2025 break;
2026 case NDTPA_MCAST_REPROBES:
2027 NEIGH_VAR_SET(p, MCAST_REPROBES,
2028 nla_get_u32(tbp[i]));
2029 break;
2030 case NDTPA_BASE_REACHABLE_TIME:
2031 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2032 nla_get_msecs(tbp[i]));
2033 /* update reachable_time as well, otherwise, the change will
2034 * only be effective after the next time neigh_periodic_work
2035 * decides to recompute it (can be multiple minutes)
2036 */
2037 p->reachable_time =
2038 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2039 break;
2040 case NDTPA_GC_STALETIME:
2041 NEIGH_VAR_SET(p, GC_STALETIME,
2042 nla_get_msecs(tbp[i]));
2043 break;
2044 case NDTPA_DELAY_PROBE_TIME:
2045 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2046 nla_get_msecs(tbp[i]));
2047 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2048 break;
2049 case NDTPA_RETRANS_TIME:
2050 NEIGH_VAR_SET(p, RETRANS_TIME,
2051 nla_get_msecs(tbp[i]));
2052 break;
2053 case NDTPA_ANYCAST_DELAY:
2054 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2055 nla_get_msecs(tbp[i]));
2056 break;
2057 case NDTPA_PROXY_DELAY:
2058 NEIGH_VAR_SET(p, PROXY_DELAY,
2059 nla_get_msecs(tbp[i]));
2060 break;
2061 case NDTPA_LOCKTIME:
2062 NEIGH_VAR_SET(p, LOCKTIME,
2063 nla_get_msecs(tbp[i]));
2064 break;
2065 }
2066 }
2067 }
2068
2069 err = -ENOENT;
2070 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2071 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2072 !net_eq(net, &init_net))
2073 goto errout_tbl_lock;
2074
2075 if (tb[NDTA_THRESH1])
2076 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2077
2078 if (tb[NDTA_THRESH2])
2079 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2080
2081 if (tb[NDTA_THRESH3])
2082 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2083
2084 if (tb[NDTA_GC_INTERVAL])
2085 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2086
2087 err = 0;
2088
2089errout_tbl_lock:
2090 write_unlock_bh(&tbl->lock);
2091errout:
2092 return err;
2093}
2094
2095static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2096{
2097 struct net *net = sock_net(skb->sk);
2098 int family, tidx, nidx = 0;
2099 int tbl_skip = cb->args[0];
2100 int neigh_skip = cb->args[1];
2101 struct neigh_table *tbl;
2102
2103 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2104
2105 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2106 struct neigh_parms *p;
2107
2108 tbl = neigh_tables[tidx];
2109 if (!tbl)
2110 continue;
2111
2112 if (tidx < tbl_skip || (family && tbl->family != family))
2113 continue;
2114
2115 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2116 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2117 NLM_F_MULTI) < 0)
2118 break;
2119
2120 nidx = 0;
2121 p = list_next_entry(&tbl->parms, list);
2122 list_for_each_entry_from(p, &tbl->parms_list, list) {
2123 if (!net_eq(neigh_parms_net(p), net))
2124 continue;
2125
2126 if (nidx < neigh_skip)
2127 goto next;
2128
2129 if (neightbl_fill_param_info(skb, tbl, p,
2130 NETLINK_CB(cb->skb).portid,
2131 cb->nlh->nlmsg_seq,
2132 RTM_NEWNEIGHTBL,
2133 NLM_F_MULTI) < 0)
2134 goto out;
2135 next:
2136 nidx++;
2137 }
2138
2139 neigh_skip = 0;
2140 }
2141out:
2142 cb->args[0] = tidx;
2143 cb->args[1] = nidx;
2144
2145 return skb->len;
2146}
2147
2148static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2149 u32 pid, u32 seq, int type, unsigned int flags)
2150{
2151 unsigned long now = jiffies;
2152 struct nda_cacheinfo ci;
2153 struct nlmsghdr *nlh;
2154 struct ndmsg *ndm;
2155
2156 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2157 if (nlh == NULL)
2158 return -EMSGSIZE;
2159
2160 ndm = nlmsg_data(nlh);
2161 ndm->ndm_family = neigh->ops->family;
2162 ndm->ndm_pad1 = 0;
2163 ndm->ndm_pad2 = 0;
2164 ndm->ndm_flags = neigh->flags;
2165 ndm->ndm_type = neigh->type;
2166 ndm->ndm_ifindex = neigh->dev->ifindex;
2167
2168 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2169 goto nla_put_failure;
2170
2171 read_lock_bh(&neigh->lock);
2172 ndm->ndm_state = neigh->nud_state;
2173 if (neigh->nud_state & NUD_VALID) {
2174 char haddr[MAX_ADDR_LEN];
2175
2176 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2177 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2178 read_unlock_bh(&neigh->lock);
2179 goto nla_put_failure;
2180 }
2181 }
2182
2183 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2184 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2185 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2186 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2187 read_unlock_bh(&neigh->lock);
2188
2189 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2190 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2191 goto nla_put_failure;
2192
2193 nlmsg_end(skb, nlh);
2194 return 0;
2195
2196nla_put_failure:
2197 nlmsg_cancel(skb, nlh);
2198 return -EMSGSIZE;
2199}
2200
2201static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2202 u32 pid, u32 seq, int type, unsigned int flags,
2203 struct neigh_table *tbl)
2204{
2205 struct nlmsghdr *nlh;
2206 struct ndmsg *ndm;
2207
2208 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2209 if (nlh == NULL)
2210 return -EMSGSIZE;
2211
2212 ndm = nlmsg_data(nlh);
2213 ndm->ndm_family = tbl->family;
2214 ndm->ndm_pad1 = 0;
2215 ndm->ndm_pad2 = 0;
2216 ndm->ndm_flags = pn->flags | NTF_PROXY;
2217 ndm->ndm_type = RTN_UNICAST;
2218 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219 ndm->ndm_state = NUD_NONE;
2220
2221 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2222 goto nla_put_failure;
2223
2224 nlmsg_end(skb, nlh);
2225 return 0;
2226
2227nla_put_failure:
2228 nlmsg_cancel(skb, nlh);
2229 return -EMSGSIZE;
2230}
2231
2232static void neigh_update_notify(struct neighbour *neigh)
2233{
2234 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2235 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2236}
2237
2238static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2239{
2240 struct net_device *master;
2241
2242 if (!master_idx)
2243 return false;
2244
2245 master = netdev_master_upper_dev_get(dev);
2246 if (!master || master->ifindex != master_idx)
2247 return true;
2248
2249 return false;
2250}
2251
2252static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2253{
2254 if (filter_idx && dev->ifindex != filter_idx)
2255 return true;
2256
2257 return false;
2258}
2259
2260static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2261 struct netlink_callback *cb)
2262{
2263 struct net *net = sock_net(skb->sk);
2264 const struct nlmsghdr *nlh = cb->nlh;
2265 struct nlattr *tb[NDA_MAX + 1];
2266 struct neighbour *n;
2267 int rc, h, s_h = cb->args[1];
2268 int idx, s_idx = idx = cb->args[2];
2269 struct neigh_hash_table *nht;
2270 int filter_master_idx = 0, filter_idx = 0;
2271 unsigned int flags = NLM_F_MULTI;
2272 int err;
2273
2274 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2275 if (!err) {
2276 if (tb[NDA_IFINDEX])
2277 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2278
2279 if (tb[NDA_MASTER])
2280 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2281
2282 if (filter_idx || filter_master_idx)
2283 flags |= NLM_F_DUMP_FILTERED;
2284 }
2285
2286 rcu_read_lock_bh();
2287 nht = rcu_dereference_bh(tbl->nht);
2288
2289 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2290 if (h > s_h)
2291 s_idx = 0;
2292 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2293 n != NULL;
2294 n = rcu_dereference_bh(n->next)) {
2295 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2296 goto next;
2297 if (neigh_ifindex_filtered(n->dev, filter_idx) ||
2298 neigh_master_filtered(n->dev, filter_master_idx))
2299 goto next;
2300 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2301 cb->nlh->nlmsg_seq,
2302 RTM_NEWNEIGH,
2303 flags) < 0) {
2304 rc = -1;
2305 goto out;
2306 }
2307next:
2308 idx++;
2309 }
2310 }
2311 rc = skb->len;
2312out:
2313 rcu_read_unlock_bh();
2314 cb->args[1] = h;
2315 cb->args[2] = idx;
2316 return rc;
2317}
2318
2319static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2320 struct netlink_callback *cb)
2321{
2322 struct pneigh_entry *n;
2323 struct net *net = sock_net(skb->sk);
2324 int rc, h, s_h = cb->args[3];
2325 int idx, s_idx = idx = cb->args[4];
2326
2327 read_lock_bh(&tbl->lock);
2328
2329 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2330 if (h > s_h)
2331 s_idx = 0;
2332 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2333 if (idx < s_idx || pneigh_net(n) != net)
2334 goto next;
2335 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2336 cb->nlh->nlmsg_seq,
2337 RTM_NEWNEIGH,
2338 NLM_F_MULTI, tbl) < 0) {
2339 read_unlock_bh(&tbl->lock);
2340 rc = -1;
2341 goto out;
2342 }
2343 next:
2344 idx++;
2345 }
2346 }
2347
2348 read_unlock_bh(&tbl->lock);
2349 rc = skb->len;
2350out:
2351 cb->args[3] = h;
2352 cb->args[4] = idx;
2353 return rc;
2354
2355}
2356
2357static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2358{
2359 struct neigh_table *tbl;
2360 int t, family, s_t;
2361 int proxy = 0;
2362 int err;
2363
2364 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2365
2366 /* check for full ndmsg structure presence, family member is
2367 * the same for both structures
2368 */
2369 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2370 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2371 proxy = 1;
2372
2373 s_t = cb->args[0];
2374
2375 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2376 tbl = neigh_tables[t];
2377
2378 if (!tbl)
2379 continue;
2380 if (t < s_t || (family && tbl->family != family))
2381 continue;
2382 if (t > s_t)
2383 memset(&cb->args[1], 0, sizeof(cb->args) -
2384 sizeof(cb->args[0]));
2385 if (proxy)
2386 err = pneigh_dump_table(tbl, skb, cb);
2387 else
2388 err = neigh_dump_table(tbl, skb, cb);
2389 if (err < 0)
2390 break;
2391 }
2392
2393 cb->args[0] = t;
2394 return skb->len;
2395}
2396
2397void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2398{
2399 int chain;
2400 struct neigh_hash_table *nht;
2401
2402 rcu_read_lock_bh();
2403 nht = rcu_dereference_bh(tbl->nht);
2404
2405 read_lock(&tbl->lock); /* avoid resizes */
2406 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2407 struct neighbour *n;
2408
2409 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2410 n != NULL;
2411 n = rcu_dereference_bh(n->next))
2412 cb(n, cookie);
2413 }
2414 read_unlock(&tbl->lock);
2415 rcu_read_unlock_bh();
2416}
2417EXPORT_SYMBOL(neigh_for_each);
2418
2419/* The tbl->lock must be held as a writer and BH disabled. */
2420void __neigh_for_each_release(struct neigh_table *tbl,
2421 int (*cb)(struct neighbour *))
2422{
2423 int chain;
2424 struct neigh_hash_table *nht;
2425
2426 nht = rcu_dereference_protected(tbl->nht,
2427 lockdep_is_held(&tbl->lock));
2428 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2429 struct neighbour *n;
2430 struct neighbour __rcu **np;
2431
2432 np = &nht->hash_buckets[chain];
2433 while ((n = rcu_dereference_protected(*np,
2434 lockdep_is_held(&tbl->lock))) != NULL) {
2435 int release;
2436
2437 write_lock(&n->lock);
2438 release = cb(n);
2439 if (release) {
2440 rcu_assign_pointer(*np,
2441 rcu_dereference_protected(n->next,
2442 lockdep_is_held(&tbl->lock)));
2443 n->dead = 1;
2444 } else
2445 np = &n->next;
2446 write_unlock(&n->lock);
2447 if (release)
2448 neigh_cleanup_and_release(n);
2449 }
2450 }
2451}
2452EXPORT_SYMBOL(__neigh_for_each_release);
2453
2454int neigh_xmit(int index, struct net_device *dev,
2455 const void *addr, struct sk_buff *skb)
2456{
2457 int err = -EAFNOSUPPORT;
2458 if (likely(index < NEIGH_NR_TABLES)) {
2459 struct neigh_table *tbl;
2460 struct neighbour *neigh;
2461
2462 tbl = neigh_tables[index];
2463 if (!tbl)
2464 goto out;
2465 rcu_read_lock_bh();
2466 neigh = __neigh_lookup_noref(tbl, addr, dev);
2467 if (!neigh)
2468 neigh = __neigh_create(tbl, addr, dev, false);
2469 err = PTR_ERR(neigh);
2470 if (IS_ERR(neigh)) {
2471 rcu_read_unlock_bh();
2472 goto out_kfree_skb;
2473 }
2474 err = neigh->output(neigh, skb);
2475 rcu_read_unlock_bh();
2476 }
2477 else if (index == NEIGH_LINK_TABLE) {
2478 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2479 addr, NULL, skb->len);
2480 if (err < 0)
2481 goto out_kfree_skb;
2482 err = dev_queue_xmit(skb);
2483 }
2484out:
2485 return err;
2486out_kfree_skb:
2487 kfree_skb(skb);
2488 goto out;
2489}
2490EXPORT_SYMBOL(neigh_xmit);
2491
2492#ifdef CONFIG_PROC_FS
2493
2494static struct neighbour *neigh_get_first(struct seq_file *seq)
2495{
2496 struct neigh_seq_state *state = seq->private;
2497 struct net *net = seq_file_net(seq);
2498 struct neigh_hash_table *nht = state->nht;
2499 struct neighbour *n = NULL;
2500 int bucket = state->bucket;
2501
2502 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2503 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2504 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2505
2506 while (n) {
2507 if (!net_eq(dev_net(n->dev), net))
2508 goto next;
2509 if (state->neigh_sub_iter) {
2510 loff_t fakep = 0;
2511 void *v;
2512
2513 v = state->neigh_sub_iter(state, n, &fakep);
2514 if (!v)
2515 goto next;
2516 }
2517 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2518 break;
2519 if (n->nud_state & ~NUD_NOARP)
2520 break;
2521next:
2522 n = rcu_dereference_bh(n->next);
2523 }
2524
2525 if (n)
2526 break;
2527 }
2528 state->bucket = bucket;
2529
2530 return n;
2531}
2532
2533static struct neighbour *neigh_get_next(struct seq_file *seq,
2534 struct neighbour *n,
2535 loff_t *pos)
2536{
2537 struct neigh_seq_state *state = seq->private;
2538 struct net *net = seq_file_net(seq);
2539 struct neigh_hash_table *nht = state->nht;
2540
2541 if (state->neigh_sub_iter) {
2542 void *v = state->neigh_sub_iter(state, n, pos);
2543 if (v)
2544 return n;
2545 }
2546 n = rcu_dereference_bh(n->next);
2547
2548 while (1) {
2549 while (n) {
2550 if (!net_eq(dev_net(n->dev), net))
2551 goto next;
2552 if (state->neigh_sub_iter) {
2553 void *v = state->neigh_sub_iter(state, n, pos);
2554 if (v)
2555 return n;
2556 goto next;
2557 }
2558 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2559 break;
2560
2561 if (n->nud_state & ~NUD_NOARP)
2562 break;
2563next:
2564 n = rcu_dereference_bh(n->next);
2565 }
2566
2567 if (n)
2568 break;
2569
2570 if (++state->bucket >= (1 << nht->hash_shift))
2571 break;
2572
2573 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2574 }
2575
2576 if (n && pos)
2577 --(*pos);
2578 return n;
2579}
2580
2581static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2582{
2583 struct neighbour *n = neigh_get_first(seq);
2584
2585 if (n) {
2586 --(*pos);
2587 while (*pos) {
2588 n = neigh_get_next(seq, n, pos);
2589 if (!n)
2590 break;
2591 }
2592 }
2593 return *pos ? NULL : n;
2594}
2595
2596static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2597{
2598 struct neigh_seq_state *state = seq->private;
2599 struct net *net = seq_file_net(seq);
2600 struct neigh_table *tbl = state->tbl;
2601 struct pneigh_entry *pn = NULL;
2602 int bucket = state->bucket;
2603
2604 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2605 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2606 pn = tbl->phash_buckets[bucket];
2607 while (pn && !net_eq(pneigh_net(pn), net))
2608 pn = pn->next;
2609 if (pn)
2610 break;
2611 }
2612 state->bucket = bucket;
2613
2614 return pn;
2615}
2616
2617static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2618 struct pneigh_entry *pn,
2619 loff_t *pos)
2620{
2621 struct neigh_seq_state *state = seq->private;
2622 struct net *net = seq_file_net(seq);
2623 struct neigh_table *tbl = state->tbl;
2624
2625 do {
2626 pn = pn->next;
2627 } while (pn && !net_eq(pneigh_net(pn), net));
2628
2629 while (!pn) {
2630 if (++state->bucket > PNEIGH_HASHMASK)
2631 break;
2632 pn = tbl->phash_buckets[state->bucket];
2633 while (pn && !net_eq(pneigh_net(pn), net))
2634 pn = pn->next;
2635 if (pn)
2636 break;
2637 }
2638
2639 if (pn && pos)
2640 --(*pos);
2641
2642 return pn;
2643}
2644
2645static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2646{
2647 struct pneigh_entry *pn = pneigh_get_first(seq);
2648
2649 if (pn) {
2650 --(*pos);
2651 while (*pos) {
2652 pn = pneigh_get_next(seq, pn, pos);
2653 if (!pn)
2654 break;
2655 }
2656 }
2657 return *pos ? NULL : pn;
2658}
2659
2660static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2661{
2662 struct neigh_seq_state *state = seq->private;
2663 void *rc;
2664 loff_t idxpos = *pos;
2665
2666 rc = neigh_get_idx(seq, &idxpos);
2667 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2668 rc = pneigh_get_idx(seq, &idxpos);
2669
2670 return rc;
2671}
2672
2673void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2674 __acquires(rcu_bh)
2675{
2676 struct neigh_seq_state *state = seq->private;
2677
2678 state->tbl = tbl;
2679 state->bucket = 0;
2680 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2681
2682 rcu_read_lock_bh();
2683 state->nht = rcu_dereference_bh(tbl->nht);
2684
2685 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2686}
2687EXPORT_SYMBOL(neigh_seq_start);
2688
2689void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2690{
2691 struct neigh_seq_state *state;
2692 void *rc;
2693
2694 if (v == SEQ_START_TOKEN) {
2695 rc = neigh_get_first(seq);
2696 goto out;
2697 }
2698
2699 state = seq->private;
2700 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2701 rc = neigh_get_next(seq, v, NULL);
2702 if (rc)
2703 goto out;
2704 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2705 rc = pneigh_get_first(seq);
2706 } else {
2707 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2708 rc = pneigh_get_next(seq, v, NULL);
2709 }
2710out:
2711 ++(*pos);
2712 return rc;
2713}
2714EXPORT_SYMBOL(neigh_seq_next);
2715
2716void neigh_seq_stop(struct seq_file *seq, void *v)
2717 __releases(rcu_bh)
2718{
2719 rcu_read_unlock_bh();
2720}
2721EXPORT_SYMBOL(neigh_seq_stop);
2722
2723/* statistics via seq_file */
2724
2725static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2726{
2727 struct neigh_table *tbl = seq->private;
2728 int cpu;
2729
2730 if (*pos == 0)
2731 return SEQ_START_TOKEN;
2732
2733 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2734 if (!cpu_possible(cpu))
2735 continue;
2736 *pos = cpu+1;
2737 return per_cpu_ptr(tbl->stats, cpu);
2738 }
2739 return NULL;
2740}
2741
2742static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2743{
2744 struct neigh_table *tbl = seq->private;
2745 int cpu;
2746
2747 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2748 if (!cpu_possible(cpu))
2749 continue;
2750 *pos = cpu+1;
2751 return per_cpu_ptr(tbl->stats, cpu);
2752 }
2753 return NULL;
2754}
2755
2756static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2757{
2758
2759}
2760
2761static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2762{
2763 struct neigh_table *tbl = seq->private;
2764 struct neigh_statistics *st = v;
2765
2766 if (v == SEQ_START_TOKEN) {
2767 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2768 return 0;
2769 }
2770
2771 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2772 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
2773 atomic_read(&tbl->entries),
2774
2775 st->allocs,
2776 st->destroys,
2777 st->hash_grows,
2778
2779 st->lookups,
2780 st->hits,
2781
2782 st->res_failed,
2783
2784 st->rcv_probes_mcast,
2785 st->rcv_probes_ucast,
2786
2787 st->periodic_gc_runs,
2788 st->forced_gc_runs,
2789 st->unres_discards,
2790 st->table_fulls
2791 );
2792
2793 return 0;
2794}
2795
2796static const struct seq_operations neigh_stat_seq_ops = {
2797 .start = neigh_stat_seq_start,
2798 .next = neigh_stat_seq_next,
2799 .stop = neigh_stat_seq_stop,
2800 .show = neigh_stat_seq_show,
2801};
2802
2803static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2804{
2805 int ret = seq_open(file, &neigh_stat_seq_ops);
2806
2807 if (!ret) {
2808 struct seq_file *sf = file->private_data;
2809 sf->private = PDE_DATA(inode);
2810 }
2811 return ret;
2812};
2813
2814static const struct file_operations neigh_stat_seq_fops = {
2815 .owner = THIS_MODULE,
2816 .open = neigh_stat_seq_open,
2817 .read = seq_read,
2818 .llseek = seq_lseek,
2819 .release = seq_release,
2820};
2821
2822#endif /* CONFIG_PROC_FS */
2823
2824static inline size_t neigh_nlmsg_size(void)
2825{
2826 return NLMSG_ALIGN(sizeof(struct ndmsg))
2827 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2828 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2829 + nla_total_size(sizeof(struct nda_cacheinfo))
2830 + nla_total_size(4); /* NDA_PROBES */
2831}
2832
2833static void __neigh_notify(struct neighbour *n, int type, int flags)
2834{
2835 struct net *net = dev_net(n->dev);
2836 struct sk_buff *skb;
2837 int err = -ENOBUFS;
2838
2839 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2840 if (skb == NULL)
2841 goto errout;
2842
2843 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2844 if (err < 0) {
2845 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2846 WARN_ON(err == -EMSGSIZE);
2847 kfree_skb(skb);
2848 goto errout;
2849 }
2850 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2851 return;
2852errout:
2853 if (err < 0)
2854 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2855}
2856
2857void neigh_app_ns(struct neighbour *n)
2858{
2859 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2860}
2861EXPORT_SYMBOL(neigh_app_ns);
2862
2863#ifdef CONFIG_SYSCTL
2864static int zero;
2865static int int_max = INT_MAX;
2866static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2867
2868static int proc_unres_qlen(struct ctl_table *ctl, int write,
2869 void __user *buffer, size_t *lenp, loff_t *ppos)
2870{
2871 int size, ret;
2872 struct ctl_table tmp = *ctl;
2873
2874 tmp.extra1 = &zero;
2875 tmp.extra2 = &unres_qlen_max;
2876 tmp.data = &size;
2877
2878 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2879 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2880
2881 if (write && !ret)
2882 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2883 return ret;
2884}
2885
2886static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2887 int family)
2888{
2889 switch (family) {
2890 case AF_INET:
2891 return __in_dev_arp_parms_get_rcu(dev);
2892 case AF_INET6:
2893 return __in6_dev_nd_parms_get_rcu(dev);
2894 }
2895 return NULL;
2896}
2897
2898static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2899 int index)
2900{
2901 struct net_device *dev;
2902 int family = neigh_parms_family(p);
2903
2904 rcu_read_lock();
2905 for_each_netdev_rcu(net, dev) {
2906 struct neigh_parms *dst_p =
2907 neigh_get_dev_parms_rcu(dev, family);
2908
2909 if (dst_p && !test_bit(index, dst_p->data_state))
2910 dst_p->data[index] = p->data[index];
2911 }
2912 rcu_read_unlock();
2913}
2914
2915static void neigh_proc_update(struct ctl_table *ctl, int write)
2916{
2917 struct net_device *dev = ctl->extra1;
2918 struct neigh_parms *p = ctl->extra2;
2919 struct net *net = neigh_parms_net(p);
2920 int index = (int *) ctl->data - p->data;
2921
2922 if (!write)
2923 return;
2924
2925 set_bit(index, p->data_state);
2926 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2927 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2928 if (!dev) /* NULL dev means this is default value */
2929 neigh_copy_dflt_parms(net, p, index);
2930}
2931
2932static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2933 void __user *buffer,
2934 size_t *lenp, loff_t *ppos)
2935{
2936 struct ctl_table tmp = *ctl;
2937 int ret;
2938
2939 tmp.extra1 = &zero;
2940 tmp.extra2 = &int_max;
2941
2942 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2943 neigh_proc_update(ctl, write);
2944 return ret;
2945}
2946
2947int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2948 void __user *buffer, size_t *lenp, loff_t *ppos)
2949{
2950 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2951
2952 neigh_proc_update(ctl, write);
2953 return ret;
2954}
2955EXPORT_SYMBOL(neigh_proc_dointvec);
2956
2957int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2958 void __user *buffer,
2959 size_t *lenp, loff_t *ppos)
2960{
2961 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2962
2963 neigh_proc_update(ctl, write);
2964 return ret;
2965}
2966EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2967
2968static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2969 void __user *buffer,
2970 size_t *lenp, loff_t *ppos)
2971{
2972 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2973
2974 neigh_proc_update(ctl, write);
2975 return ret;
2976}
2977
2978int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2979 void __user *buffer,
2980 size_t *lenp, loff_t *ppos)
2981{
2982 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2983
2984 neigh_proc_update(ctl, write);
2985 return ret;
2986}
2987EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2988
2989static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2990 void __user *buffer,
2991 size_t *lenp, loff_t *ppos)
2992{
2993 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2994
2995 neigh_proc_update(ctl, write);
2996 return ret;
2997}
2998
2999static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3000 void __user *buffer,
3001 size_t *lenp, loff_t *ppos)
3002{
3003 struct neigh_parms *p = ctl->extra2;
3004 int ret;
3005
3006 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3007 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3008 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3009 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3010 else
3011 ret = -1;
3012
3013 if (write && ret == 0) {
3014 /* update reachable_time as well, otherwise, the change will
3015 * only be effective after the next time neigh_periodic_work
3016 * decides to recompute it
3017 */
3018 p->reachable_time =
3019 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3020 }
3021 return ret;
3022}
3023
3024#define NEIGH_PARMS_DATA_OFFSET(index) \
3025 (&((struct neigh_parms *) 0)->data[index])
3026
3027#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3028 [NEIGH_VAR_ ## attr] = { \
3029 .procname = name, \
3030 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3031 .maxlen = sizeof(int), \
3032 .mode = mval, \
3033 .proc_handler = proc, \
3034 }
3035
3036#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3037 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3038
3039#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3040 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3041
3042#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3043 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3044
3045#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3046 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3047
3048#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3049 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3050
3051#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3052 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3053
3054static struct neigh_sysctl_table {
3055 struct ctl_table_header *sysctl_header;
3056 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3057} neigh_sysctl_template __read_mostly = {
3058 .neigh_vars = {
3059 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3060 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3061 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3062 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3063 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3064 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3065 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3066 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3067 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3068 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3069 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3070 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3071 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3072 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3073 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3074 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3075 [NEIGH_VAR_GC_INTERVAL] = {
3076 .procname = "gc_interval",
3077 .maxlen = sizeof(int),
3078 .mode = 0644,
3079 .proc_handler = proc_dointvec_jiffies,
3080 },
3081 [NEIGH_VAR_GC_THRESH1] = {
3082 .procname = "gc_thresh1",
3083 .maxlen = sizeof(int),
3084 .mode = 0644,
3085 .extra1 = &zero,
3086 .extra2 = &int_max,
3087 .proc_handler = proc_dointvec_minmax,
3088 },
3089 [NEIGH_VAR_GC_THRESH2] = {
3090 .procname = "gc_thresh2",
3091 .maxlen = sizeof(int),
3092 .mode = 0644,
3093 .extra1 = &zero,
3094 .extra2 = &int_max,
3095 .proc_handler = proc_dointvec_minmax,
3096 },
3097 [NEIGH_VAR_GC_THRESH3] = {
3098 .procname = "gc_thresh3",
3099 .maxlen = sizeof(int),
3100 .mode = 0644,
3101 .extra1 = &zero,
3102 .extra2 = &int_max,
3103 .proc_handler = proc_dointvec_minmax,
3104 },
3105 {},
3106 },
3107};
3108
3109int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3110 proc_handler *handler)
3111{
3112 int i;
3113 struct neigh_sysctl_table *t;
3114 const char *dev_name_source;
3115 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3116 char *p_name;
3117
3118 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3119 if (!t)
3120 goto err;
3121
3122 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3123 t->neigh_vars[i].data += (long) p;
3124 t->neigh_vars[i].extra1 = dev;
3125 t->neigh_vars[i].extra2 = p;
3126 }
3127
3128 if (dev) {
3129 dev_name_source = dev->name;
3130 /* Terminate the table early */
3131 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3132 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3133 } else {
3134 struct neigh_table *tbl = p->tbl;
3135 dev_name_source = "default";
3136 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3137 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3138 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3139 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3140 }
3141
3142 if (handler) {
3143 /* RetransTime */
3144 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3145 /* ReachableTime */
3146 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3147 /* RetransTime (in milliseconds)*/
3148 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3149 /* ReachableTime (in milliseconds) */
3150 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3151 } else {
3152 /* Those handlers will update p->reachable_time after
3153 * base_reachable_time(_ms) is set to ensure the new timer starts being
3154 * applied after the next neighbour update instead of waiting for
3155 * neigh_periodic_work to update its value (can be multiple minutes)
3156 * So any handler that replaces them should do this as well
3157 */
3158 /* ReachableTime */
3159 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3160 neigh_proc_base_reachable_time;
3161 /* ReachableTime (in milliseconds) */
3162 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3163 neigh_proc_base_reachable_time;
3164 }
3165
3166 /* Don't export sysctls to unprivileged users */
3167 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3168 t->neigh_vars[0].procname = NULL;
3169
3170 switch (neigh_parms_family(p)) {
3171 case AF_INET:
3172 p_name = "ipv4";
3173 break;
3174 case AF_INET6:
3175 p_name = "ipv6";
3176 break;
3177 default:
3178 BUG();
3179 }
3180
3181 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3182 p_name, dev_name_source);
3183 t->sysctl_header =
3184 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3185 if (!t->sysctl_header)
3186 goto free;
3187
3188 p->sysctl_table = t;
3189 return 0;
3190
3191free:
3192 kfree(t);
3193err:
3194 return -ENOBUFS;
3195}
3196EXPORT_SYMBOL(neigh_sysctl_register);
3197
3198void neigh_sysctl_unregister(struct neigh_parms *p)
3199{
3200 if (p->sysctl_table) {
3201 struct neigh_sysctl_table *t = p->sysctl_table;
3202 p->sysctl_table = NULL;
3203 unregister_net_sysctl_table(t->sysctl_header);
3204 kfree(t);
3205 }
3206}
3207EXPORT_SYMBOL(neigh_sysctl_unregister);
3208
3209#endif /* CONFIG_SYSCTL */
3210
3211static int __init neigh_init(void)
3212{
3213 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3214 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3215 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3216
3217 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3218 NULL);
3219 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3220
3221 return 0;
3222}
3223
3224subsys_initcall(neigh_init);
3225