Loading...
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/socket.h>
23#include <linux/netdevice.h>
24#include <linux/proc_fs.h>
25#ifdef CONFIG_SYSCTL
26#include <linux/sysctl.h>
27#endif
28#include <linux/times.h>
29#include <net/net_namespace.h>
30#include <net/neighbour.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/netevent.h>
34#include <net/netlink.h>
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
37#include <linux/string.h>
38#include <linux/log2.h>
39
40#define NEIGH_DEBUG 1
41
42#define NEIGH_PRINTK(x...) printk(x)
43#define NEIGH_NOPRINTK(x...) do { ; } while(0)
44#define NEIGH_PRINTK1 NEIGH_NOPRINTK
45#define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47#if NEIGH_DEBUG >= 1
48#undef NEIGH_PRINTK1
49#define NEIGH_PRINTK1 NEIGH_PRINTK
50#endif
51#if NEIGH_DEBUG >= 2
52#undef NEIGH_PRINTK2
53#define NEIGH_PRINTK2 NEIGH_PRINTK
54#endif
55
56#define PNEIGH_HASHMASK 0xF
57
58static void neigh_timer_handler(unsigned long arg);
59static void __neigh_notify(struct neighbour *n, int type, int flags);
60static void neigh_update_notify(struct neighbour *neigh);
61static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63static struct neigh_table *neigh_tables;
64#ifdef CONFIG_PROC_FS
65static const struct file_operations neigh_stat_seq_fops;
66#endif
67
68/*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99static DEFINE_RWLOCK(neigh_tbl_lock);
100
101static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
102{
103 kfree_skb(skb);
104 return -ENETDOWN;
105}
106
107static void neigh_cleanup_and_release(struct neighbour *neigh)
108{
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
111
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
114}
115
116/*
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
120 */
121
122unsigned long neigh_rand_reach_time(unsigned long base)
123{
124 return base ? (net_random() % base) + (base >> 1) : 0;
125}
126EXPORT_SYMBOL(neigh_rand_reach_time);
127
128
129static int neigh_forced_gc(struct neigh_table *tbl)
130{
131 int shrunk = 0;
132 int i;
133 struct neigh_hash_table *nht;
134
135 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
136
137 write_lock_bh(&tbl->lock);
138 nht = rcu_dereference_protected(tbl->nht,
139 lockdep_is_held(&tbl->lock));
140 for (i = 0; i < (1 << nht->hash_shift); i++) {
141 struct neighbour *n;
142 struct neighbour __rcu **np;
143
144 np = &nht->hash_buckets[i];
145 while ((n = rcu_dereference_protected(*np,
146 lockdep_is_held(&tbl->lock))) != NULL) {
147 /* Neighbour record may be discarded if:
148 * - nobody refers to it.
149 * - it is not permanent
150 */
151 write_lock(&n->lock);
152 if (atomic_read(&n->refcnt) == 1 &&
153 !(n->nud_state & NUD_PERMANENT)) {
154 rcu_assign_pointer(*np,
155 rcu_dereference_protected(n->next,
156 lockdep_is_held(&tbl->lock)));
157 n->dead = 1;
158 shrunk = 1;
159 write_unlock(&n->lock);
160 neigh_cleanup_and_release(n);
161 continue;
162 }
163 write_unlock(&n->lock);
164 np = &n->next;
165 }
166 }
167
168 tbl->last_flush = jiffies;
169
170 write_unlock_bh(&tbl->lock);
171
172 return shrunk;
173}
174
175static void neigh_add_timer(struct neighbour *n, unsigned long when)
176{
177 neigh_hold(n);
178 if (unlikely(mod_timer(&n->timer, when))) {
179 printk("NEIGH: BUG, double timer add, state is %x\n",
180 n->nud_state);
181 dump_stack();
182 }
183}
184
185static int neigh_del_timer(struct neighbour *n)
186{
187 if ((n->nud_state & NUD_IN_TIMER) &&
188 del_timer(&n->timer)) {
189 neigh_release(n);
190 return 1;
191 }
192 return 0;
193}
194
195static void pneigh_queue_purge(struct sk_buff_head *list)
196{
197 struct sk_buff *skb;
198
199 while ((skb = skb_dequeue(list)) != NULL) {
200 dev_put(skb->dev);
201 kfree_skb(skb);
202 }
203}
204
205static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
206{
207 int i;
208 struct neigh_hash_table *nht;
209
210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock));
212
213 for (i = 0; i < (1 << nht->hash_shift); i++) {
214 struct neighbour *n;
215 struct neighbour __rcu **np = &nht->hash_buckets[i];
216
217 while ((n = rcu_dereference_protected(*np,
218 lockdep_is_held(&tbl->lock))) != NULL) {
219 if (dev && n->dev != dev) {
220 np = &n->next;
221 continue;
222 }
223 rcu_assign_pointer(*np,
224 rcu_dereference_protected(n->next,
225 lockdep_is_held(&tbl->lock)));
226 write_lock(&n->lock);
227 neigh_del_timer(n);
228 n->dead = 1;
229
230 if (atomic_read(&n->refcnt) != 1) {
231 /* The most unpleasant situation.
232 We must destroy neighbour entry,
233 but someone still uses it.
234
235 The destroy will be delayed until
236 the last user releases us, but
237 we must kill timers etc. and move
238 it to safe state.
239 */
240 skb_queue_purge(&n->arp_queue);
241 n->output = neigh_blackhole;
242 if (n->nud_state & NUD_VALID)
243 n->nud_state = NUD_NOARP;
244 else
245 n->nud_state = NUD_NONE;
246 NEIGH_PRINTK2("neigh %p is stray.\n", n);
247 }
248 write_unlock(&n->lock);
249 neigh_cleanup_and_release(n);
250 }
251 }
252}
253
254void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
255{
256 write_lock_bh(&tbl->lock);
257 neigh_flush_dev(tbl, dev);
258 write_unlock_bh(&tbl->lock);
259}
260EXPORT_SYMBOL(neigh_changeaddr);
261
262int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
263{
264 write_lock_bh(&tbl->lock);
265 neigh_flush_dev(tbl, dev);
266 pneigh_ifdown(tbl, dev);
267 write_unlock_bh(&tbl->lock);
268
269 del_timer_sync(&tbl->proxy_timer);
270 pneigh_queue_purge(&tbl->proxy_queue);
271 return 0;
272}
273EXPORT_SYMBOL(neigh_ifdown);
274
275static struct neighbour *neigh_alloc(struct neigh_table *tbl)
276{
277 struct neighbour *n = NULL;
278 unsigned long now = jiffies;
279 int entries;
280
281 entries = atomic_inc_return(&tbl->entries) - 1;
282 if (entries >= tbl->gc_thresh3 ||
283 (entries >= tbl->gc_thresh2 &&
284 time_after(now, tbl->last_flush + 5 * HZ))) {
285 if (!neigh_forced_gc(tbl) &&
286 entries >= tbl->gc_thresh3)
287 goto out_entries;
288 }
289
290 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
291 if (!n)
292 goto out_entries;
293
294 skb_queue_head_init(&n->arp_queue);
295 rwlock_init(&n->lock);
296 seqlock_init(&n->ha_lock);
297 n->updated = n->used = now;
298 n->nud_state = NUD_NONE;
299 n->output = neigh_blackhole;
300 seqlock_init(&n->hh.hh_lock);
301 n->parms = neigh_parms_clone(&tbl->parms);
302 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
303
304 NEIGH_CACHE_STAT_INC(tbl, allocs);
305 n->tbl = tbl;
306 atomic_set(&n->refcnt, 1);
307 n->dead = 1;
308out:
309 return n;
310
311out_entries:
312 atomic_dec(&tbl->entries);
313 goto out;
314}
315
316static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
317{
318 size_t size = (1 << shift) * sizeof(struct neighbour *);
319 struct neigh_hash_table *ret;
320 struct neighbour __rcu **buckets;
321
322 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
323 if (!ret)
324 return NULL;
325 if (size <= PAGE_SIZE)
326 buckets = kzalloc(size, GFP_ATOMIC);
327 else
328 buckets = (struct neighbour __rcu **)
329 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
330 get_order(size));
331 if (!buckets) {
332 kfree(ret);
333 return NULL;
334 }
335 ret->hash_buckets = buckets;
336 ret->hash_shift = shift;
337 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
338 ret->hash_rnd |= 1;
339 return ret;
340}
341
342static void neigh_hash_free_rcu(struct rcu_head *head)
343{
344 struct neigh_hash_table *nht = container_of(head,
345 struct neigh_hash_table,
346 rcu);
347 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
348 struct neighbour __rcu **buckets = nht->hash_buckets;
349
350 if (size <= PAGE_SIZE)
351 kfree(buckets);
352 else
353 free_pages((unsigned long)buckets, get_order(size));
354 kfree(nht);
355}
356
357static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
358 unsigned long new_shift)
359{
360 unsigned int i, hash;
361 struct neigh_hash_table *new_nht, *old_nht;
362
363 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
364
365 old_nht = rcu_dereference_protected(tbl->nht,
366 lockdep_is_held(&tbl->lock));
367 new_nht = neigh_hash_alloc(new_shift);
368 if (!new_nht)
369 return old_nht;
370
371 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
372 struct neighbour *n, *next;
373
374 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
375 lockdep_is_held(&tbl->lock));
376 n != NULL;
377 n = next) {
378 hash = tbl->hash(n->primary_key, n->dev,
379 new_nht->hash_rnd);
380
381 hash >>= (32 - new_nht->hash_shift);
382 next = rcu_dereference_protected(n->next,
383 lockdep_is_held(&tbl->lock));
384
385 rcu_assign_pointer(n->next,
386 rcu_dereference_protected(
387 new_nht->hash_buckets[hash],
388 lockdep_is_held(&tbl->lock)));
389 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
390 }
391 }
392
393 rcu_assign_pointer(tbl->nht, new_nht);
394 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
395 return new_nht;
396}
397
398struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
399 struct net_device *dev)
400{
401 struct neighbour *n;
402 int key_len = tbl->key_len;
403 u32 hash_val;
404 struct neigh_hash_table *nht;
405
406 NEIGH_CACHE_STAT_INC(tbl, lookups);
407
408 rcu_read_lock_bh();
409 nht = rcu_dereference_bh(tbl->nht);
410 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
411
412 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
413 n != NULL;
414 n = rcu_dereference_bh(n->next)) {
415 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
416 if (!atomic_inc_not_zero(&n->refcnt))
417 n = NULL;
418 NEIGH_CACHE_STAT_INC(tbl, hits);
419 break;
420 }
421 }
422
423 rcu_read_unlock_bh();
424 return n;
425}
426EXPORT_SYMBOL(neigh_lookup);
427
428struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
429 const void *pkey)
430{
431 struct neighbour *n;
432 int key_len = tbl->key_len;
433 u32 hash_val;
434 struct neigh_hash_table *nht;
435
436 NEIGH_CACHE_STAT_INC(tbl, lookups);
437
438 rcu_read_lock_bh();
439 nht = rcu_dereference_bh(tbl->nht);
440 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
441
442 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
443 n != NULL;
444 n = rcu_dereference_bh(n->next)) {
445 if (!memcmp(n->primary_key, pkey, key_len) &&
446 net_eq(dev_net(n->dev), net)) {
447 if (!atomic_inc_not_zero(&n->refcnt))
448 n = NULL;
449 NEIGH_CACHE_STAT_INC(tbl, hits);
450 break;
451 }
452 }
453
454 rcu_read_unlock_bh();
455 return n;
456}
457EXPORT_SYMBOL(neigh_lookup_nodev);
458
459struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
460 struct net_device *dev)
461{
462 u32 hash_val;
463 int key_len = tbl->key_len;
464 int error;
465 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
466 struct neigh_hash_table *nht;
467
468 if (!n) {
469 rc = ERR_PTR(-ENOBUFS);
470 goto out;
471 }
472
473 memcpy(n->primary_key, pkey, key_len);
474 n->dev = dev;
475 dev_hold(dev);
476
477 /* Protocol specific setup. */
478 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
479 rc = ERR_PTR(error);
480 goto out_neigh_release;
481 }
482
483 /* Device specific setup. */
484 if (n->parms->neigh_setup &&
485 (error = n->parms->neigh_setup(n)) < 0) {
486 rc = ERR_PTR(error);
487 goto out_neigh_release;
488 }
489
490 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
491
492 write_lock_bh(&tbl->lock);
493 nht = rcu_dereference_protected(tbl->nht,
494 lockdep_is_held(&tbl->lock));
495
496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498
499 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500
501 if (n->parms->dead) {
502 rc = ERR_PTR(-EINVAL);
503 goto out_tbl_unlock;
504 }
505
506 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
507 lockdep_is_held(&tbl->lock));
508 n1 != NULL;
509 n1 = rcu_dereference_protected(n1->next,
510 lockdep_is_held(&tbl->lock))) {
511 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
512 neigh_hold(n1);
513 rc = n1;
514 goto out_tbl_unlock;
515 }
516 }
517
518 n->dead = 0;
519 neigh_hold(n);
520 rcu_assign_pointer(n->next,
521 rcu_dereference_protected(nht->hash_buckets[hash_val],
522 lockdep_is_held(&tbl->lock)));
523 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
524 write_unlock_bh(&tbl->lock);
525 NEIGH_PRINTK2("neigh %p is created.\n", n);
526 rc = n;
527out:
528 return rc;
529out_tbl_unlock:
530 write_unlock_bh(&tbl->lock);
531out_neigh_release:
532 neigh_release(n);
533 goto out;
534}
535EXPORT_SYMBOL(neigh_create);
536
537static u32 pneigh_hash(const void *pkey, int key_len)
538{
539 u32 hash_val = *(u32 *)(pkey + key_len - 4);
540 hash_val ^= (hash_val >> 16);
541 hash_val ^= hash_val >> 8;
542 hash_val ^= hash_val >> 4;
543 hash_val &= PNEIGH_HASHMASK;
544 return hash_val;
545}
546
547static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
548 struct net *net,
549 const void *pkey,
550 int key_len,
551 struct net_device *dev)
552{
553 while (n) {
554 if (!memcmp(n->key, pkey, key_len) &&
555 net_eq(pneigh_net(n), net) &&
556 (n->dev == dev || !n->dev))
557 return n;
558 n = n->next;
559 }
560 return NULL;
561}
562
563struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
564 struct net *net, const void *pkey, struct net_device *dev)
565{
566 int key_len = tbl->key_len;
567 u32 hash_val = pneigh_hash(pkey, key_len);
568
569 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
570 net, pkey, key_len, dev);
571}
572EXPORT_SYMBOL_GPL(__pneigh_lookup);
573
574struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
575 struct net *net, const void *pkey,
576 struct net_device *dev, int creat)
577{
578 struct pneigh_entry *n;
579 int key_len = tbl->key_len;
580 u32 hash_val = pneigh_hash(pkey, key_len);
581
582 read_lock_bh(&tbl->lock);
583 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
584 net, pkey, key_len, dev);
585 read_unlock_bh(&tbl->lock);
586
587 if (n || !creat)
588 goto out;
589
590 ASSERT_RTNL();
591
592 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
593 if (!n)
594 goto out;
595
596 write_pnet(&n->net, hold_net(net));
597 memcpy(n->key, pkey, key_len);
598 n->dev = dev;
599 if (dev)
600 dev_hold(dev);
601
602 if (tbl->pconstructor && tbl->pconstructor(n)) {
603 if (dev)
604 dev_put(dev);
605 release_net(net);
606 kfree(n);
607 n = NULL;
608 goto out;
609 }
610
611 write_lock_bh(&tbl->lock);
612 n->next = tbl->phash_buckets[hash_val];
613 tbl->phash_buckets[hash_val] = n;
614 write_unlock_bh(&tbl->lock);
615out:
616 return n;
617}
618EXPORT_SYMBOL(pneigh_lookup);
619
620
621int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
622 struct net_device *dev)
623{
624 struct pneigh_entry *n, **np;
625 int key_len = tbl->key_len;
626 u32 hash_val = pneigh_hash(pkey, key_len);
627
628 write_lock_bh(&tbl->lock);
629 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
630 np = &n->next) {
631 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
632 net_eq(pneigh_net(n), net)) {
633 *np = n->next;
634 write_unlock_bh(&tbl->lock);
635 if (tbl->pdestructor)
636 tbl->pdestructor(n);
637 if (n->dev)
638 dev_put(n->dev);
639 release_net(pneigh_net(n));
640 kfree(n);
641 return 0;
642 }
643 }
644 write_unlock_bh(&tbl->lock);
645 return -ENOENT;
646}
647
648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
649{
650 struct pneigh_entry *n, **np;
651 u32 h;
652
653 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
654 np = &tbl->phash_buckets[h];
655 while ((n = *np) != NULL) {
656 if (!dev || n->dev == dev) {
657 *np = n->next;
658 if (tbl->pdestructor)
659 tbl->pdestructor(n);
660 if (n->dev)
661 dev_put(n->dev);
662 release_net(pneigh_net(n));
663 kfree(n);
664 continue;
665 }
666 np = &n->next;
667 }
668 }
669 return -ENOENT;
670}
671
672static void neigh_parms_destroy(struct neigh_parms *parms);
673
674static inline void neigh_parms_put(struct neigh_parms *parms)
675{
676 if (atomic_dec_and_test(&parms->refcnt))
677 neigh_parms_destroy(parms);
678}
679
680static void neigh_destroy_rcu(struct rcu_head *head)
681{
682 struct neighbour *neigh = container_of(head, struct neighbour, rcu);
683
684 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
685}
686/*
687 * neighbour must already be out of the table;
688 *
689 */
690void neigh_destroy(struct neighbour *neigh)
691{
692 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
693
694 if (!neigh->dead) {
695 printk(KERN_WARNING
696 "Destroying alive neighbour %p\n", neigh);
697 dump_stack();
698 return;
699 }
700
701 if (neigh_del_timer(neigh))
702 printk(KERN_WARNING "Impossible event.\n");
703
704 skb_queue_purge(&neigh->arp_queue);
705
706 dev_put(neigh->dev);
707 neigh_parms_put(neigh->parms);
708
709 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
710
711 atomic_dec(&neigh->tbl->entries);
712 call_rcu(&neigh->rcu, neigh_destroy_rcu);
713}
714EXPORT_SYMBOL(neigh_destroy);
715
716/* Neighbour state is suspicious;
717 disable fast path.
718
719 Called with write_locked neigh.
720 */
721static void neigh_suspect(struct neighbour *neigh)
722{
723 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
724
725 neigh->output = neigh->ops->output;
726}
727
728/* Neighbour state is OK;
729 enable fast path.
730
731 Called with write_locked neigh.
732 */
733static void neigh_connect(struct neighbour *neigh)
734{
735 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
736
737 neigh->output = neigh->ops->connected_output;
738}
739
740static void neigh_periodic_work(struct work_struct *work)
741{
742 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
743 struct neighbour *n;
744 struct neighbour __rcu **np;
745 unsigned int i;
746 struct neigh_hash_table *nht;
747
748 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
749
750 write_lock_bh(&tbl->lock);
751 nht = rcu_dereference_protected(tbl->nht,
752 lockdep_is_held(&tbl->lock));
753
754 /*
755 * periodically recompute ReachableTime from random function
756 */
757
758 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
759 struct neigh_parms *p;
760 tbl->last_rand = jiffies;
761 for (p = &tbl->parms; p; p = p->next)
762 p->reachable_time =
763 neigh_rand_reach_time(p->base_reachable_time);
764 }
765
766 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
767 np = &nht->hash_buckets[i];
768
769 while ((n = rcu_dereference_protected(*np,
770 lockdep_is_held(&tbl->lock))) != NULL) {
771 unsigned int state;
772
773 write_lock(&n->lock);
774
775 state = n->nud_state;
776 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
777 write_unlock(&n->lock);
778 goto next_elt;
779 }
780
781 if (time_before(n->used, n->confirmed))
782 n->used = n->confirmed;
783
784 if (atomic_read(&n->refcnt) == 1 &&
785 (state == NUD_FAILED ||
786 time_after(jiffies, n->used + n->parms->gc_staletime))) {
787 *np = n->next;
788 n->dead = 1;
789 write_unlock(&n->lock);
790 neigh_cleanup_and_release(n);
791 continue;
792 }
793 write_unlock(&n->lock);
794
795next_elt:
796 np = &n->next;
797 }
798 /*
799 * It's fine to release lock here, even if hash table
800 * grows while we are preempted.
801 */
802 write_unlock_bh(&tbl->lock);
803 cond_resched();
804 write_lock_bh(&tbl->lock);
805 }
806 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
807 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
808 * base_reachable_time.
809 */
810 schedule_delayed_work(&tbl->gc_work,
811 tbl->parms.base_reachable_time >> 1);
812 write_unlock_bh(&tbl->lock);
813}
814
815static __inline__ int neigh_max_probes(struct neighbour *n)
816{
817 struct neigh_parms *p = n->parms;
818 return (n->nud_state & NUD_PROBE) ?
819 p->ucast_probes :
820 p->ucast_probes + p->app_probes + p->mcast_probes;
821}
822
823static void neigh_invalidate(struct neighbour *neigh)
824 __releases(neigh->lock)
825 __acquires(neigh->lock)
826{
827 struct sk_buff *skb;
828
829 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
830 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
831 neigh->updated = jiffies;
832
833 /* It is very thin place. report_unreachable is very complicated
834 routine. Particularly, it can hit the same neighbour entry!
835
836 So that, we try to be accurate and avoid dead loop. --ANK
837 */
838 while (neigh->nud_state == NUD_FAILED &&
839 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
840 write_unlock(&neigh->lock);
841 neigh->ops->error_report(neigh, skb);
842 write_lock(&neigh->lock);
843 }
844 skb_queue_purge(&neigh->arp_queue);
845}
846
847/* Called when a timer expires for a neighbour entry. */
848
849static void neigh_timer_handler(unsigned long arg)
850{
851 unsigned long now, next;
852 struct neighbour *neigh = (struct neighbour *)arg;
853 unsigned state;
854 int notify = 0;
855
856 write_lock(&neigh->lock);
857
858 state = neigh->nud_state;
859 now = jiffies;
860 next = now + HZ;
861
862 if (!(state & NUD_IN_TIMER)) {
863#ifndef CONFIG_SMP
864 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
865#endif
866 goto out;
867 }
868
869 if (state & NUD_REACHABLE) {
870 if (time_before_eq(now,
871 neigh->confirmed + neigh->parms->reachable_time)) {
872 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
873 next = neigh->confirmed + neigh->parms->reachable_time;
874 } else if (time_before_eq(now,
875 neigh->used + neigh->parms->delay_probe_time)) {
876 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
877 neigh->nud_state = NUD_DELAY;
878 neigh->updated = jiffies;
879 neigh_suspect(neigh);
880 next = now + neigh->parms->delay_probe_time;
881 } else {
882 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
883 neigh->nud_state = NUD_STALE;
884 neigh->updated = jiffies;
885 neigh_suspect(neigh);
886 notify = 1;
887 }
888 } else if (state & NUD_DELAY) {
889 if (time_before_eq(now,
890 neigh->confirmed + neigh->parms->delay_probe_time)) {
891 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
892 neigh->nud_state = NUD_REACHABLE;
893 neigh->updated = jiffies;
894 neigh_connect(neigh);
895 notify = 1;
896 next = neigh->confirmed + neigh->parms->reachable_time;
897 } else {
898 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
899 neigh->nud_state = NUD_PROBE;
900 neigh->updated = jiffies;
901 atomic_set(&neigh->probes, 0);
902 next = now + neigh->parms->retrans_time;
903 }
904 } else {
905 /* NUD_PROBE|NUD_INCOMPLETE */
906 next = now + neigh->parms->retrans_time;
907 }
908
909 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
910 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
911 neigh->nud_state = NUD_FAILED;
912 notify = 1;
913 neigh_invalidate(neigh);
914 }
915
916 if (neigh->nud_state & NUD_IN_TIMER) {
917 if (time_before(next, jiffies + HZ/2))
918 next = jiffies + HZ/2;
919 if (!mod_timer(&neigh->timer, next))
920 neigh_hold(neigh);
921 }
922 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
923 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
924 /* keep skb alive even if arp_queue overflows */
925 if (skb)
926 skb = skb_copy(skb, GFP_ATOMIC);
927 write_unlock(&neigh->lock);
928 neigh->ops->solicit(neigh, skb);
929 atomic_inc(&neigh->probes);
930 kfree_skb(skb);
931 } else {
932out:
933 write_unlock(&neigh->lock);
934 }
935
936 if (notify)
937 neigh_update_notify(neigh);
938
939 neigh_release(neigh);
940}
941
942int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
943{
944 int rc;
945 unsigned long now;
946
947 write_lock_bh(&neigh->lock);
948
949 rc = 0;
950 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
951 goto out_unlock_bh;
952
953 now = jiffies;
954
955 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
956 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
957 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
958 neigh->nud_state = NUD_INCOMPLETE;
959 neigh->updated = jiffies;
960 neigh_add_timer(neigh, now + 1);
961 } else {
962 neigh->nud_state = NUD_FAILED;
963 neigh->updated = jiffies;
964 write_unlock_bh(&neigh->lock);
965
966 kfree_skb(skb);
967 return 1;
968 }
969 } else if (neigh->nud_state & NUD_STALE) {
970 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
971 neigh->nud_state = NUD_DELAY;
972 neigh->updated = jiffies;
973 neigh_add_timer(neigh,
974 jiffies + neigh->parms->delay_probe_time);
975 }
976
977 if (neigh->nud_state == NUD_INCOMPLETE) {
978 if (skb) {
979 if (skb_queue_len(&neigh->arp_queue) >=
980 neigh->parms->queue_len) {
981 struct sk_buff *buff;
982 buff = __skb_dequeue(&neigh->arp_queue);
983 kfree_skb(buff);
984 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
985 }
986 skb_dst_force(skb);
987 __skb_queue_tail(&neigh->arp_queue, skb);
988 }
989 rc = 1;
990 }
991out_unlock_bh:
992 write_unlock_bh(&neigh->lock);
993 return rc;
994}
995EXPORT_SYMBOL(__neigh_event_send);
996
997static void neigh_update_hhs(struct neighbour *neigh)
998{
999 struct hh_cache *hh;
1000 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1001 = NULL;
1002
1003 if (neigh->dev->header_ops)
1004 update = neigh->dev->header_ops->cache_update;
1005
1006 if (update) {
1007 hh = &neigh->hh;
1008 if (hh->hh_len) {
1009 write_seqlock_bh(&hh->hh_lock);
1010 update(hh, neigh->dev, neigh->ha);
1011 write_sequnlock_bh(&hh->hh_lock);
1012 }
1013 }
1014}
1015
1016
1017
1018/* Generic update routine.
1019 -- lladdr is new lladdr or NULL, if it is not supplied.
1020 -- new is new state.
1021 -- flags
1022 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1023 if it is different.
1024 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1025 lladdr instead of overriding it
1026 if it is different.
1027 It also allows to retain current state
1028 if lladdr is unchanged.
1029 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1030
1031 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1032 NTF_ROUTER flag.
1033 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1034 a router.
1035
1036 Caller MUST hold reference count on the entry.
1037 */
1038
1039int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1040 u32 flags)
1041{
1042 u8 old;
1043 int err;
1044 int notify = 0;
1045 struct net_device *dev;
1046 int update_isrouter = 0;
1047
1048 write_lock_bh(&neigh->lock);
1049
1050 dev = neigh->dev;
1051 old = neigh->nud_state;
1052 err = -EPERM;
1053
1054 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1055 (old & (NUD_NOARP | NUD_PERMANENT)))
1056 goto out;
1057
1058 if (!(new & NUD_VALID)) {
1059 neigh_del_timer(neigh);
1060 if (old & NUD_CONNECTED)
1061 neigh_suspect(neigh);
1062 neigh->nud_state = new;
1063 err = 0;
1064 notify = old & NUD_VALID;
1065 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1066 (new & NUD_FAILED)) {
1067 neigh_invalidate(neigh);
1068 notify = 1;
1069 }
1070 goto out;
1071 }
1072
1073 /* Compare new lladdr with cached one */
1074 if (!dev->addr_len) {
1075 /* First case: device needs no address. */
1076 lladdr = neigh->ha;
1077 } else if (lladdr) {
1078 /* The second case: if something is already cached
1079 and a new address is proposed:
1080 - compare new & old
1081 - if they are different, check override flag
1082 */
1083 if ((old & NUD_VALID) &&
1084 !memcmp(lladdr, neigh->ha, dev->addr_len))
1085 lladdr = neigh->ha;
1086 } else {
1087 /* No address is supplied; if we know something,
1088 use it, otherwise discard the request.
1089 */
1090 err = -EINVAL;
1091 if (!(old & NUD_VALID))
1092 goto out;
1093 lladdr = neigh->ha;
1094 }
1095
1096 if (new & NUD_CONNECTED)
1097 neigh->confirmed = jiffies;
1098 neigh->updated = jiffies;
1099
1100 /* If entry was valid and address is not changed,
1101 do not change entry state, if new one is STALE.
1102 */
1103 err = 0;
1104 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1105 if (old & NUD_VALID) {
1106 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1107 update_isrouter = 0;
1108 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1109 (old & NUD_CONNECTED)) {
1110 lladdr = neigh->ha;
1111 new = NUD_STALE;
1112 } else
1113 goto out;
1114 } else {
1115 if (lladdr == neigh->ha && new == NUD_STALE &&
1116 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1117 (old & NUD_CONNECTED))
1118 )
1119 new = old;
1120 }
1121 }
1122
1123 if (new != old) {
1124 neigh_del_timer(neigh);
1125 if (new & NUD_IN_TIMER)
1126 neigh_add_timer(neigh, (jiffies +
1127 ((new & NUD_REACHABLE) ?
1128 neigh->parms->reachable_time :
1129 0)));
1130 neigh->nud_state = new;
1131 }
1132
1133 if (lladdr != neigh->ha) {
1134 write_seqlock(&neigh->ha_lock);
1135 memcpy(&neigh->ha, lladdr, dev->addr_len);
1136 write_sequnlock(&neigh->ha_lock);
1137 neigh_update_hhs(neigh);
1138 if (!(new & NUD_CONNECTED))
1139 neigh->confirmed = jiffies -
1140 (neigh->parms->base_reachable_time << 1);
1141 notify = 1;
1142 }
1143 if (new == old)
1144 goto out;
1145 if (new & NUD_CONNECTED)
1146 neigh_connect(neigh);
1147 else
1148 neigh_suspect(neigh);
1149 if (!(old & NUD_VALID)) {
1150 struct sk_buff *skb;
1151
1152 /* Again: avoid dead loop if something went wrong */
1153
1154 while (neigh->nud_state & NUD_VALID &&
1155 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1156 struct dst_entry *dst = skb_dst(skb);
1157 struct neighbour *n2, *n1 = neigh;
1158 write_unlock_bh(&neigh->lock);
1159 /* On shaper/eql skb->dst->neighbour != neigh :( */
1160 if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
1161 n1 = n2;
1162 n1->output(n1, skb);
1163 write_lock_bh(&neigh->lock);
1164 }
1165 skb_queue_purge(&neigh->arp_queue);
1166 }
1167out:
1168 if (update_isrouter) {
1169 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1170 (neigh->flags | NTF_ROUTER) :
1171 (neigh->flags & ~NTF_ROUTER);
1172 }
1173 write_unlock_bh(&neigh->lock);
1174
1175 if (notify)
1176 neigh_update_notify(neigh);
1177
1178 return err;
1179}
1180EXPORT_SYMBOL(neigh_update);
1181
1182struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1183 u8 *lladdr, void *saddr,
1184 struct net_device *dev)
1185{
1186 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1187 lladdr || !dev->addr_len);
1188 if (neigh)
1189 neigh_update(neigh, lladdr, NUD_STALE,
1190 NEIGH_UPDATE_F_OVERRIDE);
1191 return neigh;
1192}
1193EXPORT_SYMBOL(neigh_event_ns);
1194
1195/* called with read_lock_bh(&n->lock); */
1196static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1197{
1198 struct net_device *dev = dst->dev;
1199 __be16 prot = dst->ops->protocol;
1200 struct hh_cache *hh = &n->hh;
1201
1202 write_lock_bh(&n->lock);
1203
1204 /* Only one thread can come in here and initialize the
1205 * hh_cache entry.
1206 */
1207 if (!hh->hh_len)
1208 dev->header_ops->cache(n, hh, prot);
1209
1210 write_unlock_bh(&n->lock);
1211}
1212
1213/* This function can be used in contexts, where only old dev_queue_xmit
1214 * worked, f.e. if you want to override normal output path (eql, shaper),
1215 * but resolution is not made yet.
1216 */
1217
1218int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1219{
1220 struct net_device *dev = skb->dev;
1221
1222 __skb_pull(skb, skb_network_offset(skb));
1223
1224 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1225 skb->len) < 0 &&
1226 dev->header_ops->rebuild(skb))
1227 return 0;
1228
1229 return dev_queue_xmit(skb);
1230}
1231EXPORT_SYMBOL(neigh_compat_output);
1232
1233/* Slow and careful. */
1234
1235int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1236{
1237 struct dst_entry *dst = skb_dst(skb);
1238 int rc = 0;
1239
1240 if (!dst)
1241 goto discard;
1242
1243 __skb_pull(skb, skb_network_offset(skb));
1244
1245 if (!neigh_event_send(neigh, skb)) {
1246 int err;
1247 struct net_device *dev = neigh->dev;
1248 unsigned int seq;
1249
1250 if (dev->header_ops->cache && !neigh->hh.hh_len)
1251 neigh_hh_init(neigh, dst);
1252
1253 do {
1254 seq = read_seqbegin(&neigh->ha_lock);
1255 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1256 neigh->ha, NULL, skb->len);
1257 } while (read_seqretry(&neigh->ha_lock, seq));
1258
1259 if (err >= 0)
1260 rc = dev_queue_xmit(skb);
1261 else
1262 goto out_kfree_skb;
1263 }
1264out:
1265 return rc;
1266discard:
1267 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1268 dst, neigh);
1269out_kfree_skb:
1270 rc = -EINVAL;
1271 kfree_skb(skb);
1272 goto out;
1273}
1274EXPORT_SYMBOL(neigh_resolve_output);
1275
1276/* As fast as possible without hh cache */
1277
1278int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1279{
1280 struct net_device *dev = neigh->dev;
1281 unsigned int seq;
1282 int err;
1283
1284 __skb_pull(skb, skb_network_offset(skb));
1285
1286 do {
1287 seq = read_seqbegin(&neigh->ha_lock);
1288 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1289 neigh->ha, NULL, skb->len);
1290 } while (read_seqretry(&neigh->ha_lock, seq));
1291
1292 if (err >= 0)
1293 err = dev_queue_xmit(skb);
1294 else {
1295 err = -EINVAL;
1296 kfree_skb(skb);
1297 }
1298 return err;
1299}
1300EXPORT_SYMBOL(neigh_connected_output);
1301
1302int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1303{
1304 return dev_queue_xmit(skb);
1305}
1306EXPORT_SYMBOL(neigh_direct_output);
1307
1308static void neigh_proxy_process(unsigned long arg)
1309{
1310 struct neigh_table *tbl = (struct neigh_table *)arg;
1311 long sched_next = 0;
1312 unsigned long now = jiffies;
1313 struct sk_buff *skb, *n;
1314
1315 spin_lock(&tbl->proxy_queue.lock);
1316
1317 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1318 long tdif = NEIGH_CB(skb)->sched_next - now;
1319
1320 if (tdif <= 0) {
1321 struct net_device *dev = skb->dev;
1322
1323 __skb_unlink(skb, &tbl->proxy_queue);
1324 if (tbl->proxy_redo && netif_running(dev)) {
1325 rcu_read_lock();
1326 tbl->proxy_redo(skb);
1327 rcu_read_unlock();
1328 } else {
1329 kfree_skb(skb);
1330 }
1331
1332 dev_put(dev);
1333 } else if (!sched_next || tdif < sched_next)
1334 sched_next = tdif;
1335 }
1336 del_timer(&tbl->proxy_timer);
1337 if (sched_next)
1338 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1339 spin_unlock(&tbl->proxy_queue.lock);
1340}
1341
1342void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1343 struct sk_buff *skb)
1344{
1345 unsigned long now = jiffies;
1346 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1347
1348 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1349 kfree_skb(skb);
1350 return;
1351 }
1352
1353 NEIGH_CB(skb)->sched_next = sched_next;
1354 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1355
1356 spin_lock(&tbl->proxy_queue.lock);
1357 if (del_timer(&tbl->proxy_timer)) {
1358 if (time_before(tbl->proxy_timer.expires, sched_next))
1359 sched_next = tbl->proxy_timer.expires;
1360 }
1361 skb_dst_drop(skb);
1362 dev_hold(skb->dev);
1363 __skb_queue_tail(&tbl->proxy_queue, skb);
1364 mod_timer(&tbl->proxy_timer, sched_next);
1365 spin_unlock(&tbl->proxy_queue.lock);
1366}
1367EXPORT_SYMBOL(pneigh_enqueue);
1368
1369static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1370 struct net *net, int ifindex)
1371{
1372 struct neigh_parms *p;
1373
1374 for (p = &tbl->parms; p; p = p->next) {
1375 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1376 (!p->dev && !ifindex))
1377 return p;
1378 }
1379
1380 return NULL;
1381}
1382
1383struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1384 struct neigh_table *tbl)
1385{
1386 struct neigh_parms *p, *ref;
1387 struct net *net = dev_net(dev);
1388 const struct net_device_ops *ops = dev->netdev_ops;
1389
1390 ref = lookup_neigh_parms(tbl, net, 0);
1391 if (!ref)
1392 return NULL;
1393
1394 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1395 if (p) {
1396 p->tbl = tbl;
1397 atomic_set(&p->refcnt, 1);
1398 p->reachable_time =
1399 neigh_rand_reach_time(p->base_reachable_time);
1400
1401 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1402 kfree(p);
1403 return NULL;
1404 }
1405
1406 dev_hold(dev);
1407 p->dev = dev;
1408 write_pnet(&p->net, hold_net(net));
1409 p->sysctl_table = NULL;
1410 write_lock_bh(&tbl->lock);
1411 p->next = tbl->parms.next;
1412 tbl->parms.next = p;
1413 write_unlock_bh(&tbl->lock);
1414 }
1415 return p;
1416}
1417EXPORT_SYMBOL(neigh_parms_alloc);
1418
1419static void neigh_rcu_free_parms(struct rcu_head *head)
1420{
1421 struct neigh_parms *parms =
1422 container_of(head, struct neigh_parms, rcu_head);
1423
1424 neigh_parms_put(parms);
1425}
1426
1427void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1428{
1429 struct neigh_parms **p;
1430
1431 if (!parms || parms == &tbl->parms)
1432 return;
1433 write_lock_bh(&tbl->lock);
1434 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1435 if (*p == parms) {
1436 *p = parms->next;
1437 parms->dead = 1;
1438 write_unlock_bh(&tbl->lock);
1439 if (parms->dev)
1440 dev_put(parms->dev);
1441 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1442 return;
1443 }
1444 }
1445 write_unlock_bh(&tbl->lock);
1446 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1447}
1448EXPORT_SYMBOL(neigh_parms_release);
1449
1450static void neigh_parms_destroy(struct neigh_parms *parms)
1451{
1452 release_net(neigh_parms_net(parms));
1453 kfree(parms);
1454}
1455
1456static struct lock_class_key neigh_table_proxy_queue_class;
1457
1458void neigh_table_init_no_netlink(struct neigh_table *tbl)
1459{
1460 unsigned long now = jiffies;
1461 unsigned long phsize;
1462
1463 write_pnet(&tbl->parms.net, &init_net);
1464 atomic_set(&tbl->parms.refcnt, 1);
1465 tbl->parms.reachable_time =
1466 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1467
1468 if (!tbl->kmem_cachep)
1469 tbl->kmem_cachep =
1470 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1471 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1472 NULL);
1473 tbl->stats = alloc_percpu(struct neigh_statistics);
1474 if (!tbl->stats)
1475 panic("cannot create neighbour cache statistics");
1476
1477#ifdef CONFIG_PROC_FS
1478 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1479 &neigh_stat_seq_fops, tbl))
1480 panic("cannot create neighbour proc dir entry");
1481#endif
1482
1483 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1484
1485 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1486 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1487
1488 if (!tbl->nht || !tbl->phash_buckets)
1489 panic("cannot allocate neighbour cache hashes");
1490
1491 rwlock_init(&tbl->lock);
1492 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1493 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1494 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1495 skb_queue_head_init_class(&tbl->proxy_queue,
1496 &neigh_table_proxy_queue_class);
1497
1498 tbl->last_flush = now;
1499 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1500}
1501EXPORT_SYMBOL(neigh_table_init_no_netlink);
1502
1503void neigh_table_init(struct neigh_table *tbl)
1504{
1505 struct neigh_table *tmp;
1506
1507 neigh_table_init_no_netlink(tbl);
1508 write_lock(&neigh_tbl_lock);
1509 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1510 if (tmp->family == tbl->family)
1511 break;
1512 }
1513 tbl->next = neigh_tables;
1514 neigh_tables = tbl;
1515 write_unlock(&neigh_tbl_lock);
1516
1517 if (unlikely(tmp)) {
1518 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1519 "family %d\n", tbl->family);
1520 dump_stack();
1521 }
1522}
1523EXPORT_SYMBOL(neigh_table_init);
1524
1525int neigh_table_clear(struct neigh_table *tbl)
1526{
1527 struct neigh_table **tp;
1528
1529 /* It is not clean... Fix it to unload IPv6 module safely */
1530 cancel_delayed_work_sync(&tbl->gc_work);
1531 del_timer_sync(&tbl->proxy_timer);
1532 pneigh_queue_purge(&tbl->proxy_queue);
1533 neigh_ifdown(tbl, NULL);
1534 if (atomic_read(&tbl->entries))
1535 printk(KERN_CRIT "neighbour leakage\n");
1536 write_lock(&neigh_tbl_lock);
1537 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1538 if (*tp == tbl) {
1539 *tp = tbl->next;
1540 break;
1541 }
1542 }
1543 write_unlock(&neigh_tbl_lock);
1544
1545 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1546 neigh_hash_free_rcu);
1547 tbl->nht = NULL;
1548
1549 kfree(tbl->phash_buckets);
1550 tbl->phash_buckets = NULL;
1551
1552 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1553
1554 free_percpu(tbl->stats);
1555 tbl->stats = NULL;
1556
1557 kmem_cache_destroy(tbl->kmem_cachep);
1558 tbl->kmem_cachep = NULL;
1559
1560 return 0;
1561}
1562EXPORT_SYMBOL(neigh_table_clear);
1563
1564static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1565{
1566 struct net *net = sock_net(skb->sk);
1567 struct ndmsg *ndm;
1568 struct nlattr *dst_attr;
1569 struct neigh_table *tbl;
1570 struct net_device *dev = NULL;
1571 int err = -EINVAL;
1572
1573 ASSERT_RTNL();
1574 if (nlmsg_len(nlh) < sizeof(*ndm))
1575 goto out;
1576
1577 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1578 if (dst_attr == NULL)
1579 goto out;
1580
1581 ndm = nlmsg_data(nlh);
1582 if (ndm->ndm_ifindex) {
1583 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1584 if (dev == NULL) {
1585 err = -ENODEV;
1586 goto out;
1587 }
1588 }
1589
1590 read_lock(&neigh_tbl_lock);
1591 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1592 struct neighbour *neigh;
1593
1594 if (tbl->family != ndm->ndm_family)
1595 continue;
1596 read_unlock(&neigh_tbl_lock);
1597
1598 if (nla_len(dst_attr) < tbl->key_len)
1599 goto out;
1600
1601 if (ndm->ndm_flags & NTF_PROXY) {
1602 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1603 goto out;
1604 }
1605
1606 if (dev == NULL)
1607 goto out;
1608
1609 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1610 if (neigh == NULL) {
1611 err = -ENOENT;
1612 goto out;
1613 }
1614
1615 err = neigh_update(neigh, NULL, NUD_FAILED,
1616 NEIGH_UPDATE_F_OVERRIDE |
1617 NEIGH_UPDATE_F_ADMIN);
1618 neigh_release(neigh);
1619 goto out;
1620 }
1621 read_unlock(&neigh_tbl_lock);
1622 err = -EAFNOSUPPORT;
1623
1624out:
1625 return err;
1626}
1627
1628static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1629{
1630 struct net *net = sock_net(skb->sk);
1631 struct ndmsg *ndm;
1632 struct nlattr *tb[NDA_MAX+1];
1633 struct neigh_table *tbl;
1634 struct net_device *dev = NULL;
1635 int err;
1636
1637 ASSERT_RTNL();
1638 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1639 if (err < 0)
1640 goto out;
1641
1642 err = -EINVAL;
1643 if (tb[NDA_DST] == NULL)
1644 goto out;
1645
1646 ndm = nlmsg_data(nlh);
1647 if (ndm->ndm_ifindex) {
1648 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1649 if (dev == NULL) {
1650 err = -ENODEV;
1651 goto out;
1652 }
1653
1654 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1655 goto out;
1656 }
1657
1658 read_lock(&neigh_tbl_lock);
1659 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1660 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1661 struct neighbour *neigh;
1662 void *dst, *lladdr;
1663
1664 if (tbl->family != ndm->ndm_family)
1665 continue;
1666 read_unlock(&neigh_tbl_lock);
1667
1668 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1669 goto out;
1670 dst = nla_data(tb[NDA_DST]);
1671 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1672
1673 if (ndm->ndm_flags & NTF_PROXY) {
1674 struct pneigh_entry *pn;
1675
1676 err = -ENOBUFS;
1677 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1678 if (pn) {
1679 pn->flags = ndm->ndm_flags;
1680 err = 0;
1681 }
1682 goto out;
1683 }
1684
1685 if (dev == NULL)
1686 goto out;
1687
1688 neigh = neigh_lookup(tbl, dst, dev);
1689 if (neigh == NULL) {
1690 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1691 err = -ENOENT;
1692 goto out;
1693 }
1694
1695 neigh = __neigh_lookup_errno(tbl, dst, dev);
1696 if (IS_ERR(neigh)) {
1697 err = PTR_ERR(neigh);
1698 goto out;
1699 }
1700 } else {
1701 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1702 err = -EEXIST;
1703 neigh_release(neigh);
1704 goto out;
1705 }
1706
1707 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1708 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1709 }
1710
1711 if (ndm->ndm_flags & NTF_USE) {
1712 neigh_event_send(neigh, NULL);
1713 err = 0;
1714 } else
1715 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1716 neigh_release(neigh);
1717 goto out;
1718 }
1719
1720 read_unlock(&neigh_tbl_lock);
1721 err = -EAFNOSUPPORT;
1722out:
1723 return err;
1724}
1725
1726static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1727{
1728 struct nlattr *nest;
1729
1730 nest = nla_nest_start(skb, NDTA_PARMS);
1731 if (nest == NULL)
1732 return -ENOBUFS;
1733
1734 if (parms->dev)
1735 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1736
1737 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1738 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1739 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1740 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1741 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1742 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1743 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1744 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1745 parms->base_reachable_time);
1746 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1747 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1748 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1749 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1750 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1751 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1752
1753 return nla_nest_end(skb, nest);
1754
1755nla_put_failure:
1756 nla_nest_cancel(skb, nest);
1757 return -EMSGSIZE;
1758}
1759
1760static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1761 u32 pid, u32 seq, int type, int flags)
1762{
1763 struct nlmsghdr *nlh;
1764 struct ndtmsg *ndtmsg;
1765
1766 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1767 if (nlh == NULL)
1768 return -EMSGSIZE;
1769
1770 ndtmsg = nlmsg_data(nlh);
1771
1772 read_lock_bh(&tbl->lock);
1773 ndtmsg->ndtm_family = tbl->family;
1774 ndtmsg->ndtm_pad1 = 0;
1775 ndtmsg->ndtm_pad2 = 0;
1776
1777 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1778 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1779 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1780 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1781 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1782
1783 {
1784 unsigned long now = jiffies;
1785 unsigned int flush_delta = now - tbl->last_flush;
1786 unsigned int rand_delta = now - tbl->last_rand;
1787 struct neigh_hash_table *nht;
1788 struct ndt_config ndc = {
1789 .ndtc_key_len = tbl->key_len,
1790 .ndtc_entry_size = tbl->entry_size,
1791 .ndtc_entries = atomic_read(&tbl->entries),
1792 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1793 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1794 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1795 };
1796
1797 rcu_read_lock_bh();
1798 nht = rcu_dereference_bh(tbl->nht);
1799 ndc.ndtc_hash_rnd = nht->hash_rnd;
1800 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1801 rcu_read_unlock_bh();
1802
1803 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1804 }
1805
1806 {
1807 int cpu;
1808 struct ndt_stats ndst;
1809
1810 memset(&ndst, 0, sizeof(ndst));
1811
1812 for_each_possible_cpu(cpu) {
1813 struct neigh_statistics *st;
1814
1815 st = per_cpu_ptr(tbl->stats, cpu);
1816 ndst.ndts_allocs += st->allocs;
1817 ndst.ndts_destroys += st->destroys;
1818 ndst.ndts_hash_grows += st->hash_grows;
1819 ndst.ndts_res_failed += st->res_failed;
1820 ndst.ndts_lookups += st->lookups;
1821 ndst.ndts_hits += st->hits;
1822 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1823 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1824 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1825 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1826 }
1827
1828 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1829 }
1830
1831 BUG_ON(tbl->parms.dev);
1832 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1833 goto nla_put_failure;
1834
1835 read_unlock_bh(&tbl->lock);
1836 return nlmsg_end(skb, nlh);
1837
1838nla_put_failure:
1839 read_unlock_bh(&tbl->lock);
1840 nlmsg_cancel(skb, nlh);
1841 return -EMSGSIZE;
1842}
1843
1844static int neightbl_fill_param_info(struct sk_buff *skb,
1845 struct neigh_table *tbl,
1846 struct neigh_parms *parms,
1847 u32 pid, u32 seq, int type,
1848 unsigned int flags)
1849{
1850 struct ndtmsg *ndtmsg;
1851 struct nlmsghdr *nlh;
1852
1853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1854 if (nlh == NULL)
1855 return -EMSGSIZE;
1856
1857 ndtmsg = nlmsg_data(nlh);
1858
1859 read_lock_bh(&tbl->lock);
1860 ndtmsg->ndtm_family = tbl->family;
1861 ndtmsg->ndtm_pad1 = 0;
1862 ndtmsg->ndtm_pad2 = 0;
1863
1864 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1865 neightbl_fill_parms(skb, parms) < 0)
1866 goto errout;
1867
1868 read_unlock_bh(&tbl->lock);
1869 return nlmsg_end(skb, nlh);
1870errout:
1871 read_unlock_bh(&tbl->lock);
1872 nlmsg_cancel(skb, nlh);
1873 return -EMSGSIZE;
1874}
1875
1876static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1877 [NDTA_NAME] = { .type = NLA_STRING },
1878 [NDTA_THRESH1] = { .type = NLA_U32 },
1879 [NDTA_THRESH2] = { .type = NLA_U32 },
1880 [NDTA_THRESH3] = { .type = NLA_U32 },
1881 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1882 [NDTA_PARMS] = { .type = NLA_NESTED },
1883};
1884
1885static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1886 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1887 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1888 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1889 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1890 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1891 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1892 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1893 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1894 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1895 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1896 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1897 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1898 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1899};
1900
1901static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1902{
1903 struct net *net = sock_net(skb->sk);
1904 struct neigh_table *tbl;
1905 struct ndtmsg *ndtmsg;
1906 struct nlattr *tb[NDTA_MAX+1];
1907 int err;
1908
1909 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1910 nl_neightbl_policy);
1911 if (err < 0)
1912 goto errout;
1913
1914 if (tb[NDTA_NAME] == NULL) {
1915 err = -EINVAL;
1916 goto errout;
1917 }
1918
1919 ndtmsg = nlmsg_data(nlh);
1920 read_lock(&neigh_tbl_lock);
1921 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1922 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1923 continue;
1924
1925 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1926 break;
1927 }
1928
1929 if (tbl == NULL) {
1930 err = -ENOENT;
1931 goto errout_locked;
1932 }
1933
1934 /*
1935 * We acquire tbl->lock to be nice to the periodic timers and
1936 * make sure they always see a consistent set of values.
1937 */
1938 write_lock_bh(&tbl->lock);
1939
1940 if (tb[NDTA_PARMS]) {
1941 struct nlattr *tbp[NDTPA_MAX+1];
1942 struct neigh_parms *p;
1943 int i, ifindex = 0;
1944
1945 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1946 nl_ntbl_parm_policy);
1947 if (err < 0)
1948 goto errout_tbl_lock;
1949
1950 if (tbp[NDTPA_IFINDEX])
1951 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1952
1953 p = lookup_neigh_parms(tbl, net, ifindex);
1954 if (p == NULL) {
1955 err = -ENOENT;
1956 goto errout_tbl_lock;
1957 }
1958
1959 for (i = 1; i <= NDTPA_MAX; i++) {
1960 if (tbp[i] == NULL)
1961 continue;
1962
1963 switch (i) {
1964 case NDTPA_QUEUE_LEN:
1965 p->queue_len = nla_get_u32(tbp[i]);
1966 break;
1967 case NDTPA_PROXY_QLEN:
1968 p->proxy_qlen = nla_get_u32(tbp[i]);
1969 break;
1970 case NDTPA_APP_PROBES:
1971 p->app_probes = nla_get_u32(tbp[i]);
1972 break;
1973 case NDTPA_UCAST_PROBES:
1974 p->ucast_probes = nla_get_u32(tbp[i]);
1975 break;
1976 case NDTPA_MCAST_PROBES:
1977 p->mcast_probes = nla_get_u32(tbp[i]);
1978 break;
1979 case NDTPA_BASE_REACHABLE_TIME:
1980 p->base_reachable_time = nla_get_msecs(tbp[i]);
1981 break;
1982 case NDTPA_GC_STALETIME:
1983 p->gc_staletime = nla_get_msecs(tbp[i]);
1984 break;
1985 case NDTPA_DELAY_PROBE_TIME:
1986 p->delay_probe_time = nla_get_msecs(tbp[i]);
1987 break;
1988 case NDTPA_RETRANS_TIME:
1989 p->retrans_time = nla_get_msecs(tbp[i]);
1990 break;
1991 case NDTPA_ANYCAST_DELAY:
1992 p->anycast_delay = nla_get_msecs(tbp[i]);
1993 break;
1994 case NDTPA_PROXY_DELAY:
1995 p->proxy_delay = nla_get_msecs(tbp[i]);
1996 break;
1997 case NDTPA_LOCKTIME:
1998 p->locktime = nla_get_msecs(tbp[i]);
1999 break;
2000 }
2001 }
2002 }
2003
2004 if (tb[NDTA_THRESH1])
2005 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2006
2007 if (tb[NDTA_THRESH2])
2008 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2009
2010 if (tb[NDTA_THRESH3])
2011 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2012
2013 if (tb[NDTA_GC_INTERVAL])
2014 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2015
2016 err = 0;
2017
2018errout_tbl_lock:
2019 write_unlock_bh(&tbl->lock);
2020errout_locked:
2021 read_unlock(&neigh_tbl_lock);
2022errout:
2023 return err;
2024}
2025
2026static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2027{
2028 struct net *net = sock_net(skb->sk);
2029 int family, tidx, nidx = 0;
2030 int tbl_skip = cb->args[0];
2031 int neigh_skip = cb->args[1];
2032 struct neigh_table *tbl;
2033
2034 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2035
2036 read_lock(&neigh_tbl_lock);
2037 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2038 struct neigh_parms *p;
2039
2040 if (tidx < tbl_skip || (family && tbl->family != family))
2041 continue;
2042
2043 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2044 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2045 NLM_F_MULTI) <= 0)
2046 break;
2047
2048 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2049 if (!net_eq(neigh_parms_net(p), net))
2050 continue;
2051
2052 if (nidx < neigh_skip)
2053 goto next;
2054
2055 if (neightbl_fill_param_info(skb, tbl, p,
2056 NETLINK_CB(cb->skb).pid,
2057 cb->nlh->nlmsg_seq,
2058 RTM_NEWNEIGHTBL,
2059 NLM_F_MULTI) <= 0)
2060 goto out;
2061 next:
2062 nidx++;
2063 }
2064
2065 neigh_skip = 0;
2066 }
2067out:
2068 read_unlock(&neigh_tbl_lock);
2069 cb->args[0] = tidx;
2070 cb->args[1] = nidx;
2071
2072 return skb->len;
2073}
2074
2075static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2076 u32 pid, u32 seq, int type, unsigned int flags)
2077{
2078 unsigned long now = jiffies;
2079 struct nda_cacheinfo ci;
2080 struct nlmsghdr *nlh;
2081 struct ndmsg *ndm;
2082
2083 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2084 if (nlh == NULL)
2085 return -EMSGSIZE;
2086
2087 ndm = nlmsg_data(nlh);
2088 ndm->ndm_family = neigh->ops->family;
2089 ndm->ndm_pad1 = 0;
2090 ndm->ndm_pad2 = 0;
2091 ndm->ndm_flags = neigh->flags;
2092 ndm->ndm_type = neigh->type;
2093 ndm->ndm_ifindex = neigh->dev->ifindex;
2094
2095 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2096
2097 read_lock_bh(&neigh->lock);
2098 ndm->ndm_state = neigh->nud_state;
2099 if (neigh->nud_state & NUD_VALID) {
2100 char haddr[MAX_ADDR_LEN];
2101
2102 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2103 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2104 read_unlock_bh(&neigh->lock);
2105 goto nla_put_failure;
2106 }
2107 }
2108
2109 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2110 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2111 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2112 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2113 read_unlock_bh(&neigh->lock);
2114
2115 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2116 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2117
2118 return nlmsg_end(skb, nlh);
2119
2120nla_put_failure:
2121 nlmsg_cancel(skb, nlh);
2122 return -EMSGSIZE;
2123}
2124
2125static void neigh_update_notify(struct neighbour *neigh)
2126{
2127 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2128 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2129}
2130
2131static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2132 struct netlink_callback *cb)
2133{
2134 struct net *net = sock_net(skb->sk);
2135 struct neighbour *n;
2136 int rc, h, s_h = cb->args[1];
2137 int idx, s_idx = idx = cb->args[2];
2138 struct neigh_hash_table *nht;
2139
2140 rcu_read_lock_bh();
2141 nht = rcu_dereference_bh(tbl->nht);
2142
2143 for (h = 0; h < (1 << nht->hash_shift); h++) {
2144 if (h < s_h)
2145 continue;
2146 if (h > s_h)
2147 s_idx = 0;
2148 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2149 n != NULL;
2150 n = rcu_dereference_bh(n->next)) {
2151 if (!net_eq(dev_net(n->dev), net))
2152 continue;
2153 if (idx < s_idx)
2154 goto next;
2155 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2156 cb->nlh->nlmsg_seq,
2157 RTM_NEWNEIGH,
2158 NLM_F_MULTI) <= 0) {
2159 rc = -1;
2160 goto out;
2161 }
2162next:
2163 idx++;
2164 }
2165 }
2166 rc = skb->len;
2167out:
2168 rcu_read_unlock_bh();
2169 cb->args[1] = h;
2170 cb->args[2] = idx;
2171 return rc;
2172}
2173
2174static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2175{
2176 struct neigh_table *tbl;
2177 int t, family, s_t;
2178
2179 read_lock(&neigh_tbl_lock);
2180 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2181 s_t = cb->args[0];
2182
2183 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2184 if (t < s_t || (family && tbl->family != family))
2185 continue;
2186 if (t > s_t)
2187 memset(&cb->args[1], 0, sizeof(cb->args) -
2188 sizeof(cb->args[0]));
2189 if (neigh_dump_table(tbl, skb, cb) < 0)
2190 break;
2191 }
2192 read_unlock(&neigh_tbl_lock);
2193
2194 cb->args[0] = t;
2195 return skb->len;
2196}
2197
2198void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2199{
2200 int chain;
2201 struct neigh_hash_table *nht;
2202
2203 rcu_read_lock_bh();
2204 nht = rcu_dereference_bh(tbl->nht);
2205
2206 read_lock(&tbl->lock); /* avoid resizes */
2207 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2208 struct neighbour *n;
2209
2210 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2211 n != NULL;
2212 n = rcu_dereference_bh(n->next))
2213 cb(n, cookie);
2214 }
2215 read_unlock(&tbl->lock);
2216 rcu_read_unlock_bh();
2217}
2218EXPORT_SYMBOL(neigh_for_each);
2219
2220/* The tbl->lock must be held as a writer and BH disabled. */
2221void __neigh_for_each_release(struct neigh_table *tbl,
2222 int (*cb)(struct neighbour *))
2223{
2224 int chain;
2225 struct neigh_hash_table *nht;
2226
2227 nht = rcu_dereference_protected(tbl->nht,
2228 lockdep_is_held(&tbl->lock));
2229 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2230 struct neighbour *n;
2231 struct neighbour __rcu **np;
2232
2233 np = &nht->hash_buckets[chain];
2234 while ((n = rcu_dereference_protected(*np,
2235 lockdep_is_held(&tbl->lock))) != NULL) {
2236 int release;
2237
2238 write_lock(&n->lock);
2239 release = cb(n);
2240 if (release) {
2241 rcu_assign_pointer(*np,
2242 rcu_dereference_protected(n->next,
2243 lockdep_is_held(&tbl->lock)));
2244 n->dead = 1;
2245 } else
2246 np = &n->next;
2247 write_unlock(&n->lock);
2248 if (release)
2249 neigh_cleanup_and_release(n);
2250 }
2251 }
2252}
2253EXPORT_SYMBOL(__neigh_for_each_release);
2254
2255#ifdef CONFIG_PROC_FS
2256
2257static struct neighbour *neigh_get_first(struct seq_file *seq)
2258{
2259 struct neigh_seq_state *state = seq->private;
2260 struct net *net = seq_file_net(seq);
2261 struct neigh_hash_table *nht = state->nht;
2262 struct neighbour *n = NULL;
2263 int bucket = state->bucket;
2264
2265 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2266 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2267 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2268
2269 while (n) {
2270 if (!net_eq(dev_net(n->dev), net))
2271 goto next;
2272 if (state->neigh_sub_iter) {
2273 loff_t fakep = 0;
2274 void *v;
2275
2276 v = state->neigh_sub_iter(state, n, &fakep);
2277 if (!v)
2278 goto next;
2279 }
2280 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2281 break;
2282 if (n->nud_state & ~NUD_NOARP)
2283 break;
2284next:
2285 n = rcu_dereference_bh(n->next);
2286 }
2287
2288 if (n)
2289 break;
2290 }
2291 state->bucket = bucket;
2292
2293 return n;
2294}
2295
2296static struct neighbour *neigh_get_next(struct seq_file *seq,
2297 struct neighbour *n,
2298 loff_t *pos)
2299{
2300 struct neigh_seq_state *state = seq->private;
2301 struct net *net = seq_file_net(seq);
2302 struct neigh_hash_table *nht = state->nht;
2303
2304 if (state->neigh_sub_iter) {
2305 void *v = state->neigh_sub_iter(state, n, pos);
2306 if (v)
2307 return n;
2308 }
2309 n = rcu_dereference_bh(n->next);
2310
2311 while (1) {
2312 while (n) {
2313 if (!net_eq(dev_net(n->dev), net))
2314 goto next;
2315 if (state->neigh_sub_iter) {
2316 void *v = state->neigh_sub_iter(state, n, pos);
2317 if (v)
2318 return n;
2319 goto next;
2320 }
2321 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2322 break;
2323
2324 if (n->nud_state & ~NUD_NOARP)
2325 break;
2326next:
2327 n = rcu_dereference_bh(n->next);
2328 }
2329
2330 if (n)
2331 break;
2332
2333 if (++state->bucket >= (1 << nht->hash_shift))
2334 break;
2335
2336 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2337 }
2338
2339 if (n && pos)
2340 --(*pos);
2341 return n;
2342}
2343
2344static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2345{
2346 struct neighbour *n = neigh_get_first(seq);
2347
2348 if (n) {
2349 --(*pos);
2350 while (*pos) {
2351 n = neigh_get_next(seq, n, pos);
2352 if (!n)
2353 break;
2354 }
2355 }
2356 return *pos ? NULL : n;
2357}
2358
2359static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2360{
2361 struct neigh_seq_state *state = seq->private;
2362 struct net *net = seq_file_net(seq);
2363 struct neigh_table *tbl = state->tbl;
2364 struct pneigh_entry *pn = NULL;
2365 int bucket = state->bucket;
2366
2367 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2368 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2369 pn = tbl->phash_buckets[bucket];
2370 while (pn && !net_eq(pneigh_net(pn), net))
2371 pn = pn->next;
2372 if (pn)
2373 break;
2374 }
2375 state->bucket = bucket;
2376
2377 return pn;
2378}
2379
2380static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2381 struct pneigh_entry *pn,
2382 loff_t *pos)
2383{
2384 struct neigh_seq_state *state = seq->private;
2385 struct net *net = seq_file_net(seq);
2386 struct neigh_table *tbl = state->tbl;
2387
2388 pn = pn->next;
2389 while (!pn) {
2390 if (++state->bucket > PNEIGH_HASHMASK)
2391 break;
2392 pn = tbl->phash_buckets[state->bucket];
2393 while (pn && !net_eq(pneigh_net(pn), net))
2394 pn = pn->next;
2395 if (pn)
2396 break;
2397 }
2398
2399 if (pn && pos)
2400 --(*pos);
2401
2402 return pn;
2403}
2404
2405static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2406{
2407 struct pneigh_entry *pn = pneigh_get_first(seq);
2408
2409 if (pn) {
2410 --(*pos);
2411 while (*pos) {
2412 pn = pneigh_get_next(seq, pn, pos);
2413 if (!pn)
2414 break;
2415 }
2416 }
2417 return *pos ? NULL : pn;
2418}
2419
2420static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2421{
2422 struct neigh_seq_state *state = seq->private;
2423 void *rc;
2424 loff_t idxpos = *pos;
2425
2426 rc = neigh_get_idx(seq, &idxpos);
2427 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2428 rc = pneigh_get_idx(seq, &idxpos);
2429
2430 return rc;
2431}
2432
2433void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2434 __acquires(rcu_bh)
2435{
2436 struct neigh_seq_state *state = seq->private;
2437
2438 state->tbl = tbl;
2439 state->bucket = 0;
2440 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2441
2442 rcu_read_lock_bh();
2443 state->nht = rcu_dereference_bh(tbl->nht);
2444
2445 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2446}
2447EXPORT_SYMBOL(neigh_seq_start);
2448
2449void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2450{
2451 struct neigh_seq_state *state;
2452 void *rc;
2453
2454 if (v == SEQ_START_TOKEN) {
2455 rc = neigh_get_first(seq);
2456 goto out;
2457 }
2458
2459 state = seq->private;
2460 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2461 rc = neigh_get_next(seq, v, NULL);
2462 if (rc)
2463 goto out;
2464 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2465 rc = pneigh_get_first(seq);
2466 } else {
2467 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2468 rc = pneigh_get_next(seq, v, NULL);
2469 }
2470out:
2471 ++(*pos);
2472 return rc;
2473}
2474EXPORT_SYMBOL(neigh_seq_next);
2475
2476void neigh_seq_stop(struct seq_file *seq, void *v)
2477 __releases(rcu_bh)
2478{
2479 rcu_read_unlock_bh();
2480}
2481EXPORT_SYMBOL(neigh_seq_stop);
2482
2483/* statistics via seq_file */
2484
2485static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2486{
2487 struct neigh_table *tbl = seq->private;
2488 int cpu;
2489
2490 if (*pos == 0)
2491 return SEQ_START_TOKEN;
2492
2493 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2494 if (!cpu_possible(cpu))
2495 continue;
2496 *pos = cpu+1;
2497 return per_cpu_ptr(tbl->stats, cpu);
2498 }
2499 return NULL;
2500}
2501
2502static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2503{
2504 struct neigh_table *tbl = seq->private;
2505 int cpu;
2506
2507 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2508 if (!cpu_possible(cpu))
2509 continue;
2510 *pos = cpu+1;
2511 return per_cpu_ptr(tbl->stats, cpu);
2512 }
2513 return NULL;
2514}
2515
2516static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2517{
2518
2519}
2520
2521static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2522{
2523 struct neigh_table *tbl = seq->private;
2524 struct neigh_statistics *st = v;
2525
2526 if (v == SEQ_START_TOKEN) {
2527 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2528 return 0;
2529 }
2530
2531 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2532 "%08lx %08lx %08lx %08lx %08lx\n",
2533 atomic_read(&tbl->entries),
2534
2535 st->allocs,
2536 st->destroys,
2537 st->hash_grows,
2538
2539 st->lookups,
2540 st->hits,
2541
2542 st->res_failed,
2543
2544 st->rcv_probes_mcast,
2545 st->rcv_probes_ucast,
2546
2547 st->periodic_gc_runs,
2548 st->forced_gc_runs,
2549 st->unres_discards
2550 );
2551
2552 return 0;
2553}
2554
2555static const struct seq_operations neigh_stat_seq_ops = {
2556 .start = neigh_stat_seq_start,
2557 .next = neigh_stat_seq_next,
2558 .stop = neigh_stat_seq_stop,
2559 .show = neigh_stat_seq_show,
2560};
2561
2562static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2563{
2564 int ret = seq_open(file, &neigh_stat_seq_ops);
2565
2566 if (!ret) {
2567 struct seq_file *sf = file->private_data;
2568 sf->private = PDE(inode)->data;
2569 }
2570 return ret;
2571};
2572
2573static const struct file_operations neigh_stat_seq_fops = {
2574 .owner = THIS_MODULE,
2575 .open = neigh_stat_seq_open,
2576 .read = seq_read,
2577 .llseek = seq_lseek,
2578 .release = seq_release,
2579};
2580
2581#endif /* CONFIG_PROC_FS */
2582
2583static inline size_t neigh_nlmsg_size(void)
2584{
2585 return NLMSG_ALIGN(sizeof(struct ndmsg))
2586 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2587 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2588 + nla_total_size(sizeof(struct nda_cacheinfo))
2589 + nla_total_size(4); /* NDA_PROBES */
2590}
2591
2592static void __neigh_notify(struct neighbour *n, int type, int flags)
2593{
2594 struct net *net = dev_net(n->dev);
2595 struct sk_buff *skb;
2596 int err = -ENOBUFS;
2597
2598 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2599 if (skb == NULL)
2600 goto errout;
2601
2602 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2603 if (err < 0) {
2604 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2605 WARN_ON(err == -EMSGSIZE);
2606 kfree_skb(skb);
2607 goto errout;
2608 }
2609 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2610 return;
2611errout:
2612 if (err < 0)
2613 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2614}
2615
2616#ifdef CONFIG_ARPD
2617void neigh_app_ns(struct neighbour *n)
2618{
2619 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2620}
2621EXPORT_SYMBOL(neigh_app_ns);
2622#endif /* CONFIG_ARPD */
2623
2624#ifdef CONFIG_SYSCTL
2625
2626#define NEIGH_VARS_MAX 19
2627
2628static struct neigh_sysctl_table {
2629 struct ctl_table_header *sysctl_header;
2630 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2631 char *dev_name;
2632} neigh_sysctl_template __read_mostly = {
2633 .neigh_vars = {
2634 {
2635 .procname = "mcast_solicit",
2636 .maxlen = sizeof(int),
2637 .mode = 0644,
2638 .proc_handler = proc_dointvec,
2639 },
2640 {
2641 .procname = "ucast_solicit",
2642 .maxlen = sizeof(int),
2643 .mode = 0644,
2644 .proc_handler = proc_dointvec,
2645 },
2646 {
2647 .procname = "app_solicit",
2648 .maxlen = sizeof(int),
2649 .mode = 0644,
2650 .proc_handler = proc_dointvec,
2651 },
2652 {
2653 .procname = "retrans_time",
2654 .maxlen = sizeof(int),
2655 .mode = 0644,
2656 .proc_handler = proc_dointvec_userhz_jiffies,
2657 },
2658 {
2659 .procname = "base_reachable_time",
2660 .maxlen = sizeof(int),
2661 .mode = 0644,
2662 .proc_handler = proc_dointvec_jiffies,
2663 },
2664 {
2665 .procname = "delay_first_probe_time",
2666 .maxlen = sizeof(int),
2667 .mode = 0644,
2668 .proc_handler = proc_dointvec_jiffies,
2669 },
2670 {
2671 .procname = "gc_stale_time",
2672 .maxlen = sizeof(int),
2673 .mode = 0644,
2674 .proc_handler = proc_dointvec_jiffies,
2675 },
2676 {
2677 .procname = "unres_qlen",
2678 .maxlen = sizeof(int),
2679 .mode = 0644,
2680 .proc_handler = proc_dointvec,
2681 },
2682 {
2683 .procname = "proxy_qlen",
2684 .maxlen = sizeof(int),
2685 .mode = 0644,
2686 .proc_handler = proc_dointvec,
2687 },
2688 {
2689 .procname = "anycast_delay",
2690 .maxlen = sizeof(int),
2691 .mode = 0644,
2692 .proc_handler = proc_dointvec_userhz_jiffies,
2693 },
2694 {
2695 .procname = "proxy_delay",
2696 .maxlen = sizeof(int),
2697 .mode = 0644,
2698 .proc_handler = proc_dointvec_userhz_jiffies,
2699 },
2700 {
2701 .procname = "locktime",
2702 .maxlen = sizeof(int),
2703 .mode = 0644,
2704 .proc_handler = proc_dointvec_userhz_jiffies,
2705 },
2706 {
2707 .procname = "retrans_time_ms",
2708 .maxlen = sizeof(int),
2709 .mode = 0644,
2710 .proc_handler = proc_dointvec_ms_jiffies,
2711 },
2712 {
2713 .procname = "base_reachable_time_ms",
2714 .maxlen = sizeof(int),
2715 .mode = 0644,
2716 .proc_handler = proc_dointvec_ms_jiffies,
2717 },
2718 {
2719 .procname = "gc_interval",
2720 .maxlen = sizeof(int),
2721 .mode = 0644,
2722 .proc_handler = proc_dointvec_jiffies,
2723 },
2724 {
2725 .procname = "gc_thresh1",
2726 .maxlen = sizeof(int),
2727 .mode = 0644,
2728 .proc_handler = proc_dointvec,
2729 },
2730 {
2731 .procname = "gc_thresh2",
2732 .maxlen = sizeof(int),
2733 .mode = 0644,
2734 .proc_handler = proc_dointvec,
2735 },
2736 {
2737 .procname = "gc_thresh3",
2738 .maxlen = sizeof(int),
2739 .mode = 0644,
2740 .proc_handler = proc_dointvec,
2741 },
2742 {},
2743 },
2744};
2745
2746int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2747 char *p_name, proc_handler *handler)
2748{
2749 struct neigh_sysctl_table *t;
2750 const char *dev_name_source = NULL;
2751
2752#define NEIGH_CTL_PATH_ROOT 0
2753#define NEIGH_CTL_PATH_PROTO 1
2754#define NEIGH_CTL_PATH_NEIGH 2
2755#define NEIGH_CTL_PATH_DEV 3
2756
2757 struct ctl_path neigh_path[] = {
2758 { .procname = "net", },
2759 { .procname = "proto", },
2760 { .procname = "neigh", },
2761 { .procname = "default", },
2762 { },
2763 };
2764
2765 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2766 if (!t)
2767 goto err;
2768
2769 t->neigh_vars[0].data = &p->mcast_probes;
2770 t->neigh_vars[1].data = &p->ucast_probes;
2771 t->neigh_vars[2].data = &p->app_probes;
2772 t->neigh_vars[3].data = &p->retrans_time;
2773 t->neigh_vars[4].data = &p->base_reachable_time;
2774 t->neigh_vars[5].data = &p->delay_probe_time;
2775 t->neigh_vars[6].data = &p->gc_staletime;
2776 t->neigh_vars[7].data = &p->queue_len;
2777 t->neigh_vars[8].data = &p->proxy_qlen;
2778 t->neigh_vars[9].data = &p->anycast_delay;
2779 t->neigh_vars[10].data = &p->proxy_delay;
2780 t->neigh_vars[11].data = &p->locktime;
2781 t->neigh_vars[12].data = &p->retrans_time;
2782 t->neigh_vars[13].data = &p->base_reachable_time;
2783
2784 if (dev) {
2785 dev_name_source = dev->name;
2786 /* Terminate the table early */
2787 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2788 } else {
2789 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2790 t->neigh_vars[14].data = (int *)(p + 1);
2791 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2792 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2793 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2794 }
2795
2796
2797 if (handler) {
2798 /* RetransTime */
2799 t->neigh_vars[3].proc_handler = handler;
2800 t->neigh_vars[3].extra1 = dev;
2801 /* ReachableTime */
2802 t->neigh_vars[4].proc_handler = handler;
2803 t->neigh_vars[4].extra1 = dev;
2804 /* RetransTime (in milliseconds)*/
2805 t->neigh_vars[12].proc_handler = handler;
2806 t->neigh_vars[12].extra1 = dev;
2807 /* ReachableTime (in milliseconds) */
2808 t->neigh_vars[13].proc_handler = handler;
2809 t->neigh_vars[13].extra1 = dev;
2810 }
2811
2812 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2813 if (!t->dev_name)
2814 goto free;
2815
2816 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2817 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2818
2819 t->sysctl_header =
2820 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2821 if (!t->sysctl_header)
2822 goto free_procname;
2823
2824 p->sysctl_table = t;
2825 return 0;
2826
2827free_procname:
2828 kfree(t->dev_name);
2829free:
2830 kfree(t);
2831err:
2832 return -ENOBUFS;
2833}
2834EXPORT_SYMBOL(neigh_sysctl_register);
2835
2836void neigh_sysctl_unregister(struct neigh_parms *p)
2837{
2838 if (p->sysctl_table) {
2839 struct neigh_sysctl_table *t = p->sysctl_table;
2840 p->sysctl_table = NULL;
2841 unregister_sysctl_table(t->sysctl_header);
2842 kfree(t->dev_name);
2843 kfree(t);
2844 }
2845}
2846EXPORT_SYMBOL(neigh_sysctl_unregister);
2847
2848#endif /* CONFIG_SYSCTL */
2849
2850static int __init neigh_init(void)
2851{
2852 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
2853 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
2854 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
2855
2856 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
2857 NULL);
2858 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
2859
2860 return 0;
2861}
2862
2863subsys_initcall(neigh_init);
2864
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/slab.h>
17#include <linux/kmemleak.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
22#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
28#include <net/net_namespace.h>
29#include <net/neighbour.h>
30#include <net/arp.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/netevent.h>
34#include <net/netlink.h>
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
37#include <linux/string.h>
38#include <linux/log2.h>
39#include <linux/inetdevice.h>
40#include <net/addrconf.h>
41
42#include <trace/events/neigh.h>
43
44#define NEIGH_DEBUG 1
45#define neigh_dbg(level, fmt, ...) \
46do { \
47 if (level <= NEIGH_DEBUG) \
48 pr_debug(fmt, ##__VA_ARGS__); \
49} while (0)
50
51#define PNEIGH_HASHMASK 0xF
52
53static void neigh_timer_handler(struct timer_list *t);
54static void __neigh_notify(struct neighbour *n, int type, int flags,
55 u32 pid);
56static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 struct net_device *dev);
59
60#ifdef CONFIG_PROC_FS
61static const struct seq_operations neigh_stat_seq_ops;
62#endif
63
64/*
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
66
67 - All the scans/updates to hash buckets MUST be made under this lock.
68 - NOTHING clever should be made under this lock: no callbacks
69 to protocol backends, no attempts to send something to network.
70 It will result in deadlocks, if backend/driver wants to use neighbour
71 cache.
72 - If the entry requires some non-trivial actions, increase
73 its reference count and release table lock.
74
75 Neighbour entries are protected:
76 - with reference count.
77 - with rwlock neigh->lock
78
79 Reference count prevents destruction.
80
81 neigh->lock mainly serializes ll address data and its validity state.
82 However, the same lock is used to protect another entry fields:
83 - timer
84 - resolution queue
85
86 Again, nothing clever shall be made under neigh->lock,
87 the most complicated procedure, which we allow is dev->hard_header.
88 It is supposed, that dev->hard_header is simplistic and does
89 not make callbacks to neighbour tables.
90 */
91
92static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93{
94 kfree_skb(skb);
95 return -ENETDOWN;
96}
97
98static void neigh_cleanup_and_release(struct neighbour *neigh)
99{
100 trace_neigh_cleanup_and_release(neigh, 0);
101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 neigh_release(neigh);
104}
105
106/*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
112unsigned long neigh_rand_reach_time(unsigned long base)
113{
114 return base ? get_random_u32_below(base) + (base >> 1) : 0;
115}
116EXPORT_SYMBOL(neigh_rand_reach_time);
117
118static void neigh_mark_dead(struct neighbour *n)
119{
120 n->dead = 1;
121 if (!list_empty(&n->gc_list)) {
122 list_del_init(&n->gc_list);
123 atomic_dec(&n->tbl->gc_entries);
124 }
125 if (!list_empty(&n->managed_list))
126 list_del_init(&n->managed_list);
127}
128
129static void neigh_update_gc_list(struct neighbour *n)
130{
131 bool on_gc_list, exempt_from_gc;
132
133 write_lock_bh(&n->tbl->lock);
134 write_lock(&n->lock);
135 if (n->dead)
136 goto out;
137
138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
140 */
141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
143 on_gc_list = !list_empty(&n->gc_list);
144
145 if (exempt_from_gc && on_gc_list) {
146 list_del_init(&n->gc_list);
147 atomic_dec(&n->tbl->gc_entries);
148 } else if (!exempt_from_gc && !on_gc_list) {
149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
152 }
153out:
154 write_unlock(&n->lock);
155 write_unlock_bh(&n->tbl->lock);
156}
157
158static void neigh_update_managed_list(struct neighbour *n)
159{
160 bool on_managed_list, add_to_managed;
161
162 write_lock_bh(&n->tbl->lock);
163 write_lock(&n->lock);
164 if (n->dead)
165 goto out;
166
167 add_to_managed = n->flags & NTF_MANAGED;
168 on_managed_list = !list_empty(&n->managed_list);
169
170 if (!add_to_managed && on_managed_list)
171 list_del_init(&n->managed_list);
172 else if (add_to_managed && !on_managed_list)
173 list_add_tail(&n->managed_list, &n->tbl->managed_list);
174out:
175 write_unlock(&n->lock);
176 write_unlock_bh(&n->tbl->lock);
177}
178
179static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 bool *gc_update, bool *managed_update)
181{
182 u32 ndm_flags, old_flags = neigh->flags;
183
184 if (!(flags & NEIGH_UPDATE_F_ADMIN))
185 return;
186
187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189
190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
191 if (ndm_flags & NTF_EXT_LEARNED)
192 neigh->flags |= NTF_EXT_LEARNED;
193 else
194 neigh->flags &= ~NTF_EXT_LEARNED;
195 *notify = 1;
196 *gc_update = true;
197 }
198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 if (ndm_flags & NTF_MANAGED)
200 neigh->flags |= NTF_MANAGED;
201 else
202 neigh->flags &= ~NTF_MANAGED;
203 *notify = 1;
204 *managed_update = true;
205 }
206}
207
208static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 struct neigh_table *tbl)
210{
211 bool retval = false;
212
213 write_lock(&n->lock);
214 if (refcount_read(&n->refcnt) == 1) {
215 struct neighbour *neigh;
216
217 neigh = rcu_dereference_protected(n->next,
218 lockdep_is_held(&tbl->lock));
219 rcu_assign_pointer(*np, neigh);
220 neigh_mark_dead(n);
221 retval = true;
222 }
223 write_unlock(&n->lock);
224 if (retval)
225 neigh_cleanup_and_release(n);
226 return retval;
227}
228
229bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230{
231 struct neigh_hash_table *nht;
232 void *pkey = ndel->primary_key;
233 u32 hash_val;
234 struct neighbour *n;
235 struct neighbour __rcu **np;
236
237 nht = rcu_dereference_protected(tbl->nht,
238 lockdep_is_held(&tbl->lock));
239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 hash_val = hash_val >> (32 - nht->hash_shift);
241
242 np = &nht->hash_buckets[hash_val];
243 while ((n = rcu_dereference_protected(*np,
244 lockdep_is_held(&tbl->lock)))) {
245 if (n == ndel)
246 return neigh_del(n, np, tbl);
247 np = &n->next;
248 }
249 return false;
250}
251
252static int neigh_forced_gc(struct neigh_table *tbl)
253{
254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
255 unsigned long tref = jiffies - 5 * HZ;
256 struct neighbour *n, *tmp;
257 int shrunk = 0;
258
259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
260
261 write_lock_bh(&tbl->lock);
262
263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
264 if (refcount_read(&n->refcnt) == 1) {
265 bool remove = false;
266
267 write_lock(&n->lock);
268 if ((n->nud_state == NUD_FAILED) ||
269 (n->nud_state == NUD_NOARP) ||
270 (tbl->is_multicast &&
271 tbl->is_multicast(n->primary_key)) ||
272 !time_in_range(n->updated, tref, jiffies))
273 remove = true;
274 write_unlock(&n->lock);
275
276 if (remove && neigh_remove_one(n, tbl))
277 shrunk++;
278 if (shrunk >= max_clean)
279 break;
280 }
281 }
282
283 tbl->last_flush = jiffies;
284
285 write_unlock_bh(&tbl->lock);
286
287 return shrunk;
288}
289
290static void neigh_add_timer(struct neighbour *n, unsigned long when)
291{
292 /* Use safe distance from the jiffies - LONG_MAX point while timer
293 * is running in DELAY/PROBE state but still show to user space
294 * large times in the past.
295 */
296 unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
297
298 neigh_hold(n);
299 if (!time_in_range(n->confirmed, mint, jiffies))
300 n->confirmed = mint;
301 if (time_before(n->used, n->confirmed))
302 n->used = n->confirmed;
303 if (unlikely(mod_timer(&n->timer, when))) {
304 printk("NEIGH: BUG, double timer add, state is %x\n",
305 n->nud_state);
306 dump_stack();
307 }
308}
309
310static int neigh_del_timer(struct neighbour *n)
311{
312 if ((n->nud_state & NUD_IN_TIMER) &&
313 del_timer(&n->timer)) {
314 neigh_release(n);
315 return 1;
316 }
317 return 0;
318}
319
320static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
321 int family)
322{
323 switch (family) {
324 case AF_INET:
325 return __in_dev_arp_parms_get_rcu(dev);
326 case AF_INET6:
327 return __in6_dev_nd_parms_get_rcu(dev);
328 }
329 return NULL;
330}
331
332static void neigh_parms_qlen_dec(struct net_device *dev, int family)
333{
334 struct neigh_parms *p;
335
336 rcu_read_lock();
337 p = neigh_get_dev_parms_rcu(dev, family);
338 if (p)
339 p->qlen--;
340 rcu_read_unlock();
341}
342
343static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
344 int family)
345{
346 struct sk_buff_head tmp;
347 unsigned long flags;
348 struct sk_buff *skb;
349
350 skb_queue_head_init(&tmp);
351 spin_lock_irqsave(&list->lock, flags);
352 skb = skb_peek(list);
353 while (skb != NULL) {
354 struct sk_buff *skb_next = skb_peek_next(skb, list);
355 struct net_device *dev = skb->dev;
356
357 if (net == NULL || net_eq(dev_net(dev), net)) {
358 neigh_parms_qlen_dec(dev, family);
359 __skb_unlink(skb, list);
360 __skb_queue_tail(&tmp, skb);
361 }
362 skb = skb_next;
363 }
364 spin_unlock_irqrestore(&list->lock, flags);
365
366 while ((skb = __skb_dequeue(&tmp))) {
367 dev_put(skb->dev);
368 kfree_skb(skb);
369 }
370}
371
372static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
373 bool skip_perm)
374{
375 int i;
376 struct neigh_hash_table *nht;
377
378 nht = rcu_dereference_protected(tbl->nht,
379 lockdep_is_held(&tbl->lock));
380
381 for (i = 0; i < (1 << nht->hash_shift); i++) {
382 struct neighbour *n;
383 struct neighbour __rcu **np = &nht->hash_buckets[i];
384
385 while ((n = rcu_dereference_protected(*np,
386 lockdep_is_held(&tbl->lock))) != NULL) {
387 if (dev && n->dev != dev) {
388 np = &n->next;
389 continue;
390 }
391 if (skip_perm && n->nud_state & NUD_PERMANENT) {
392 np = &n->next;
393 continue;
394 }
395 rcu_assign_pointer(*np,
396 rcu_dereference_protected(n->next,
397 lockdep_is_held(&tbl->lock)));
398 write_lock(&n->lock);
399 neigh_del_timer(n);
400 neigh_mark_dead(n);
401 if (refcount_read(&n->refcnt) != 1) {
402 /* The most unpleasant situation.
403 We must destroy neighbour entry,
404 but someone still uses it.
405
406 The destroy will be delayed until
407 the last user releases us, but
408 we must kill timers etc. and move
409 it to safe state.
410 */
411 __skb_queue_purge(&n->arp_queue);
412 n->arp_queue_len_bytes = 0;
413 n->output = neigh_blackhole;
414 if (n->nud_state & NUD_VALID)
415 n->nud_state = NUD_NOARP;
416 else
417 n->nud_state = NUD_NONE;
418 neigh_dbg(2, "neigh %p is stray\n", n);
419 }
420 write_unlock(&n->lock);
421 neigh_cleanup_and_release(n);
422 }
423 }
424}
425
426void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
427{
428 write_lock_bh(&tbl->lock);
429 neigh_flush_dev(tbl, dev, false);
430 write_unlock_bh(&tbl->lock);
431}
432EXPORT_SYMBOL(neigh_changeaddr);
433
434static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
435 bool skip_perm)
436{
437 write_lock_bh(&tbl->lock);
438 neigh_flush_dev(tbl, dev, skip_perm);
439 pneigh_ifdown_and_unlock(tbl, dev);
440 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
441 tbl->family);
442 if (skb_queue_empty_lockless(&tbl->proxy_queue))
443 del_timer_sync(&tbl->proxy_timer);
444 return 0;
445}
446
447int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
448{
449 __neigh_ifdown(tbl, dev, true);
450 return 0;
451}
452EXPORT_SYMBOL(neigh_carrier_down);
453
454int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
455{
456 __neigh_ifdown(tbl, dev, false);
457 return 0;
458}
459EXPORT_SYMBOL(neigh_ifdown);
460
461static struct neighbour *neigh_alloc(struct neigh_table *tbl,
462 struct net_device *dev,
463 u32 flags, bool exempt_from_gc)
464{
465 struct neighbour *n = NULL;
466 unsigned long now = jiffies;
467 int entries;
468
469 if (exempt_from_gc)
470 goto do_alloc;
471
472 entries = atomic_inc_return(&tbl->gc_entries) - 1;
473 if (entries >= tbl->gc_thresh3 ||
474 (entries >= tbl->gc_thresh2 &&
475 time_after(now, tbl->last_flush + 5 * HZ))) {
476 if (!neigh_forced_gc(tbl) &&
477 entries >= tbl->gc_thresh3) {
478 net_info_ratelimited("%s: neighbor table overflow!\n",
479 tbl->id);
480 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
481 goto out_entries;
482 }
483 }
484
485do_alloc:
486 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
487 if (!n)
488 goto out_entries;
489
490 __skb_queue_head_init(&n->arp_queue);
491 rwlock_init(&n->lock);
492 seqlock_init(&n->ha_lock);
493 n->updated = n->used = now;
494 n->nud_state = NUD_NONE;
495 n->output = neigh_blackhole;
496 n->flags = flags;
497 seqlock_init(&n->hh.hh_lock);
498 n->parms = neigh_parms_clone(&tbl->parms);
499 timer_setup(&n->timer, neigh_timer_handler, 0);
500
501 NEIGH_CACHE_STAT_INC(tbl, allocs);
502 n->tbl = tbl;
503 refcount_set(&n->refcnt, 1);
504 n->dead = 1;
505 INIT_LIST_HEAD(&n->gc_list);
506 INIT_LIST_HEAD(&n->managed_list);
507
508 atomic_inc(&tbl->entries);
509out:
510 return n;
511
512out_entries:
513 if (!exempt_from_gc)
514 atomic_dec(&tbl->gc_entries);
515 goto out;
516}
517
518static void neigh_get_hash_rnd(u32 *x)
519{
520 *x = get_random_u32() | 1;
521}
522
523static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
524{
525 size_t size = (1 << shift) * sizeof(struct neighbour *);
526 struct neigh_hash_table *ret;
527 struct neighbour __rcu **buckets;
528 int i;
529
530 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
531 if (!ret)
532 return NULL;
533 if (size <= PAGE_SIZE) {
534 buckets = kzalloc(size, GFP_ATOMIC);
535 } else {
536 buckets = (struct neighbour __rcu **)
537 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
538 get_order(size));
539 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
540 }
541 if (!buckets) {
542 kfree(ret);
543 return NULL;
544 }
545 ret->hash_buckets = buckets;
546 ret->hash_shift = shift;
547 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
548 neigh_get_hash_rnd(&ret->hash_rnd[i]);
549 return ret;
550}
551
552static void neigh_hash_free_rcu(struct rcu_head *head)
553{
554 struct neigh_hash_table *nht = container_of(head,
555 struct neigh_hash_table,
556 rcu);
557 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
558 struct neighbour __rcu **buckets = nht->hash_buckets;
559
560 if (size <= PAGE_SIZE) {
561 kfree(buckets);
562 } else {
563 kmemleak_free(buckets);
564 free_pages((unsigned long)buckets, get_order(size));
565 }
566 kfree(nht);
567}
568
569static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
570 unsigned long new_shift)
571{
572 unsigned int i, hash;
573 struct neigh_hash_table *new_nht, *old_nht;
574
575 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
576
577 old_nht = rcu_dereference_protected(tbl->nht,
578 lockdep_is_held(&tbl->lock));
579 new_nht = neigh_hash_alloc(new_shift);
580 if (!new_nht)
581 return old_nht;
582
583 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
584 struct neighbour *n, *next;
585
586 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
587 lockdep_is_held(&tbl->lock));
588 n != NULL;
589 n = next) {
590 hash = tbl->hash(n->primary_key, n->dev,
591 new_nht->hash_rnd);
592
593 hash >>= (32 - new_nht->hash_shift);
594 next = rcu_dereference_protected(n->next,
595 lockdep_is_held(&tbl->lock));
596
597 rcu_assign_pointer(n->next,
598 rcu_dereference_protected(
599 new_nht->hash_buckets[hash],
600 lockdep_is_held(&tbl->lock)));
601 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
602 }
603 }
604
605 rcu_assign_pointer(tbl->nht, new_nht);
606 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
607 return new_nht;
608}
609
610struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
611 struct net_device *dev)
612{
613 struct neighbour *n;
614
615 NEIGH_CACHE_STAT_INC(tbl, lookups);
616
617 rcu_read_lock_bh();
618 n = __neigh_lookup_noref(tbl, pkey, dev);
619 if (n) {
620 if (!refcount_inc_not_zero(&n->refcnt))
621 n = NULL;
622 NEIGH_CACHE_STAT_INC(tbl, hits);
623 }
624
625 rcu_read_unlock_bh();
626 return n;
627}
628EXPORT_SYMBOL(neigh_lookup);
629
630struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
631 const void *pkey)
632{
633 struct neighbour *n;
634 unsigned int key_len = tbl->key_len;
635 u32 hash_val;
636 struct neigh_hash_table *nht;
637
638 NEIGH_CACHE_STAT_INC(tbl, lookups);
639
640 rcu_read_lock_bh();
641 nht = rcu_dereference_bh(tbl->nht);
642 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
643
644 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
645 n != NULL;
646 n = rcu_dereference_bh(n->next)) {
647 if (!memcmp(n->primary_key, pkey, key_len) &&
648 net_eq(dev_net(n->dev), net)) {
649 if (!refcount_inc_not_zero(&n->refcnt))
650 n = NULL;
651 NEIGH_CACHE_STAT_INC(tbl, hits);
652 break;
653 }
654 }
655
656 rcu_read_unlock_bh();
657 return n;
658}
659EXPORT_SYMBOL(neigh_lookup_nodev);
660
661static struct neighbour *
662___neigh_create(struct neigh_table *tbl, const void *pkey,
663 struct net_device *dev, u32 flags,
664 bool exempt_from_gc, bool want_ref)
665{
666 u32 hash_val, key_len = tbl->key_len;
667 struct neighbour *n1, *rc, *n;
668 struct neigh_hash_table *nht;
669 int error;
670
671 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
672 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
673 if (!n) {
674 rc = ERR_PTR(-ENOBUFS);
675 goto out;
676 }
677
678 memcpy(n->primary_key, pkey, key_len);
679 n->dev = dev;
680 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
681
682 /* Protocol specific setup. */
683 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
684 rc = ERR_PTR(error);
685 goto out_neigh_release;
686 }
687
688 if (dev->netdev_ops->ndo_neigh_construct) {
689 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
690 if (error < 0) {
691 rc = ERR_PTR(error);
692 goto out_neigh_release;
693 }
694 }
695
696 /* Device specific setup. */
697 if (n->parms->neigh_setup &&
698 (error = n->parms->neigh_setup(n)) < 0) {
699 rc = ERR_PTR(error);
700 goto out_neigh_release;
701 }
702
703 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
704
705 write_lock_bh(&tbl->lock);
706 nht = rcu_dereference_protected(tbl->nht,
707 lockdep_is_held(&tbl->lock));
708
709 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
710 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
711
712 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
713
714 if (n->parms->dead) {
715 rc = ERR_PTR(-EINVAL);
716 goto out_tbl_unlock;
717 }
718
719 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
720 lockdep_is_held(&tbl->lock));
721 n1 != NULL;
722 n1 = rcu_dereference_protected(n1->next,
723 lockdep_is_held(&tbl->lock))) {
724 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
725 if (want_ref)
726 neigh_hold(n1);
727 rc = n1;
728 goto out_tbl_unlock;
729 }
730 }
731
732 n->dead = 0;
733 if (!exempt_from_gc)
734 list_add_tail(&n->gc_list, &n->tbl->gc_list);
735 if (n->flags & NTF_MANAGED)
736 list_add_tail(&n->managed_list, &n->tbl->managed_list);
737 if (want_ref)
738 neigh_hold(n);
739 rcu_assign_pointer(n->next,
740 rcu_dereference_protected(nht->hash_buckets[hash_val],
741 lockdep_is_held(&tbl->lock)));
742 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
743 write_unlock_bh(&tbl->lock);
744 neigh_dbg(2, "neigh %p is created\n", n);
745 rc = n;
746out:
747 return rc;
748out_tbl_unlock:
749 write_unlock_bh(&tbl->lock);
750out_neigh_release:
751 if (!exempt_from_gc)
752 atomic_dec(&tbl->gc_entries);
753 neigh_release(n);
754 goto out;
755}
756
757struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
758 struct net_device *dev, bool want_ref)
759{
760 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
761}
762EXPORT_SYMBOL(__neigh_create);
763
764static u32 pneigh_hash(const void *pkey, unsigned int key_len)
765{
766 u32 hash_val = *(u32 *)(pkey + key_len - 4);
767 hash_val ^= (hash_val >> 16);
768 hash_val ^= hash_val >> 8;
769 hash_val ^= hash_val >> 4;
770 hash_val &= PNEIGH_HASHMASK;
771 return hash_val;
772}
773
774static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
775 struct net *net,
776 const void *pkey,
777 unsigned int key_len,
778 struct net_device *dev)
779{
780 while (n) {
781 if (!memcmp(n->key, pkey, key_len) &&
782 net_eq(pneigh_net(n), net) &&
783 (n->dev == dev || !n->dev))
784 return n;
785 n = n->next;
786 }
787 return NULL;
788}
789
790struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
791 struct net *net, const void *pkey, struct net_device *dev)
792{
793 unsigned int key_len = tbl->key_len;
794 u32 hash_val = pneigh_hash(pkey, key_len);
795
796 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
797 net, pkey, key_len, dev);
798}
799EXPORT_SYMBOL_GPL(__pneigh_lookup);
800
801struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
802 struct net *net, const void *pkey,
803 struct net_device *dev, int creat)
804{
805 struct pneigh_entry *n;
806 unsigned int key_len = tbl->key_len;
807 u32 hash_val = pneigh_hash(pkey, key_len);
808
809 read_lock_bh(&tbl->lock);
810 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
811 net, pkey, key_len, dev);
812 read_unlock_bh(&tbl->lock);
813
814 if (n || !creat)
815 goto out;
816
817 ASSERT_RTNL();
818
819 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
820 if (!n)
821 goto out;
822
823 write_pnet(&n->net, net);
824 memcpy(n->key, pkey, key_len);
825 n->dev = dev;
826 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
827
828 if (tbl->pconstructor && tbl->pconstructor(n)) {
829 netdev_put(dev, &n->dev_tracker);
830 kfree(n);
831 n = NULL;
832 goto out;
833 }
834
835 write_lock_bh(&tbl->lock);
836 n->next = tbl->phash_buckets[hash_val];
837 tbl->phash_buckets[hash_val] = n;
838 write_unlock_bh(&tbl->lock);
839out:
840 return n;
841}
842EXPORT_SYMBOL(pneigh_lookup);
843
844
845int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
846 struct net_device *dev)
847{
848 struct pneigh_entry *n, **np;
849 unsigned int key_len = tbl->key_len;
850 u32 hash_val = pneigh_hash(pkey, key_len);
851
852 write_lock_bh(&tbl->lock);
853 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
854 np = &n->next) {
855 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
856 net_eq(pneigh_net(n), net)) {
857 *np = n->next;
858 write_unlock_bh(&tbl->lock);
859 if (tbl->pdestructor)
860 tbl->pdestructor(n);
861 netdev_put(n->dev, &n->dev_tracker);
862 kfree(n);
863 return 0;
864 }
865 }
866 write_unlock_bh(&tbl->lock);
867 return -ENOENT;
868}
869
870static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
871 struct net_device *dev)
872{
873 struct pneigh_entry *n, **np, *freelist = NULL;
874 u32 h;
875
876 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
877 np = &tbl->phash_buckets[h];
878 while ((n = *np) != NULL) {
879 if (!dev || n->dev == dev) {
880 *np = n->next;
881 n->next = freelist;
882 freelist = n;
883 continue;
884 }
885 np = &n->next;
886 }
887 }
888 write_unlock_bh(&tbl->lock);
889 while ((n = freelist)) {
890 freelist = n->next;
891 n->next = NULL;
892 if (tbl->pdestructor)
893 tbl->pdestructor(n);
894 netdev_put(n->dev, &n->dev_tracker);
895 kfree(n);
896 }
897 return -ENOENT;
898}
899
900static void neigh_parms_destroy(struct neigh_parms *parms);
901
902static inline void neigh_parms_put(struct neigh_parms *parms)
903{
904 if (refcount_dec_and_test(&parms->refcnt))
905 neigh_parms_destroy(parms);
906}
907
908/*
909 * neighbour must already be out of the table;
910 *
911 */
912void neigh_destroy(struct neighbour *neigh)
913{
914 struct net_device *dev = neigh->dev;
915
916 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
917
918 if (!neigh->dead) {
919 pr_warn("Destroying alive neighbour %p\n", neigh);
920 dump_stack();
921 return;
922 }
923
924 if (neigh_del_timer(neigh))
925 pr_warn("Impossible event\n");
926
927 write_lock_bh(&neigh->lock);
928 __skb_queue_purge(&neigh->arp_queue);
929 write_unlock_bh(&neigh->lock);
930 neigh->arp_queue_len_bytes = 0;
931
932 if (dev->netdev_ops->ndo_neigh_destroy)
933 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
934
935 netdev_put(dev, &neigh->dev_tracker);
936 neigh_parms_put(neigh->parms);
937
938 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
939
940 atomic_dec(&neigh->tbl->entries);
941 kfree_rcu(neigh, rcu);
942}
943EXPORT_SYMBOL(neigh_destroy);
944
945/* Neighbour state is suspicious;
946 disable fast path.
947
948 Called with write_locked neigh.
949 */
950static void neigh_suspect(struct neighbour *neigh)
951{
952 neigh_dbg(2, "neigh %p is suspected\n", neigh);
953
954 neigh->output = neigh->ops->output;
955}
956
957/* Neighbour state is OK;
958 enable fast path.
959
960 Called with write_locked neigh.
961 */
962static void neigh_connect(struct neighbour *neigh)
963{
964 neigh_dbg(2, "neigh %p is connected\n", neigh);
965
966 neigh->output = neigh->ops->connected_output;
967}
968
969static void neigh_periodic_work(struct work_struct *work)
970{
971 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
972 struct neighbour *n;
973 struct neighbour __rcu **np;
974 unsigned int i;
975 struct neigh_hash_table *nht;
976
977 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
978
979 write_lock_bh(&tbl->lock);
980 nht = rcu_dereference_protected(tbl->nht,
981 lockdep_is_held(&tbl->lock));
982
983 /*
984 * periodically recompute ReachableTime from random function
985 */
986
987 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
988 struct neigh_parms *p;
989 tbl->last_rand = jiffies;
990 list_for_each_entry(p, &tbl->parms_list, list)
991 p->reachable_time =
992 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
993 }
994
995 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
996 goto out;
997
998 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
999 np = &nht->hash_buckets[i];
1000
1001 while ((n = rcu_dereference_protected(*np,
1002 lockdep_is_held(&tbl->lock))) != NULL) {
1003 unsigned int state;
1004
1005 write_lock(&n->lock);
1006
1007 state = n->nud_state;
1008 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
1009 (n->flags & NTF_EXT_LEARNED)) {
1010 write_unlock(&n->lock);
1011 goto next_elt;
1012 }
1013
1014 if (time_before(n->used, n->confirmed) &&
1015 time_is_before_eq_jiffies(n->confirmed))
1016 n->used = n->confirmed;
1017
1018 if (refcount_read(&n->refcnt) == 1 &&
1019 (state == NUD_FAILED ||
1020 !time_in_range_open(jiffies, n->used,
1021 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
1022 *np = n->next;
1023 neigh_mark_dead(n);
1024 write_unlock(&n->lock);
1025 neigh_cleanup_and_release(n);
1026 continue;
1027 }
1028 write_unlock(&n->lock);
1029
1030next_elt:
1031 np = &n->next;
1032 }
1033 /*
1034 * It's fine to release lock here, even if hash table
1035 * grows while we are preempted.
1036 */
1037 write_unlock_bh(&tbl->lock);
1038 cond_resched();
1039 write_lock_bh(&tbl->lock);
1040 nht = rcu_dereference_protected(tbl->nht,
1041 lockdep_is_held(&tbl->lock));
1042 }
1043out:
1044 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
1045 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
1046 * BASE_REACHABLE_TIME.
1047 */
1048 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1049 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1050 write_unlock_bh(&tbl->lock);
1051}
1052
1053static __inline__ int neigh_max_probes(struct neighbour *n)
1054{
1055 struct neigh_parms *p = n->parms;
1056 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1057 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1058 NEIGH_VAR(p, MCAST_PROBES));
1059}
1060
1061static void neigh_invalidate(struct neighbour *neigh)
1062 __releases(neigh->lock)
1063 __acquires(neigh->lock)
1064{
1065 struct sk_buff *skb;
1066
1067 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1068 neigh_dbg(2, "neigh %p is failed\n", neigh);
1069 neigh->updated = jiffies;
1070
1071 /* It is very thin place. report_unreachable is very complicated
1072 routine. Particularly, it can hit the same neighbour entry!
1073
1074 So that, we try to be accurate and avoid dead loop. --ANK
1075 */
1076 while (neigh->nud_state == NUD_FAILED &&
1077 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1078 write_unlock(&neigh->lock);
1079 neigh->ops->error_report(neigh, skb);
1080 write_lock(&neigh->lock);
1081 }
1082 __skb_queue_purge(&neigh->arp_queue);
1083 neigh->arp_queue_len_bytes = 0;
1084}
1085
1086static void neigh_probe(struct neighbour *neigh)
1087 __releases(neigh->lock)
1088{
1089 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1090 /* keep skb alive even if arp_queue overflows */
1091 if (skb)
1092 skb = skb_clone(skb, GFP_ATOMIC);
1093 write_unlock(&neigh->lock);
1094 if (neigh->ops->solicit)
1095 neigh->ops->solicit(neigh, skb);
1096 atomic_inc(&neigh->probes);
1097 consume_skb(skb);
1098}
1099
1100/* Called when a timer expires for a neighbour entry. */
1101
1102static void neigh_timer_handler(struct timer_list *t)
1103{
1104 unsigned long now, next;
1105 struct neighbour *neigh = from_timer(neigh, t, timer);
1106 unsigned int state;
1107 int notify = 0;
1108
1109 write_lock(&neigh->lock);
1110
1111 state = neigh->nud_state;
1112 now = jiffies;
1113 next = now + HZ;
1114
1115 if (!(state & NUD_IN_TIMER))
1116 goto out;
1117
1118 if (state & NUD_REACHABLE) {
1119 if (time_before_eq(now,
1120 neigh->confirmed + neigh->parms->reachable_time)) {
1121 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1122 next = neigh->confirmed + neigh->parms->reachable_time;
1123 } else if (time_before_eq(now,
1124 neigh->used +
1125 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1126 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1127 neigh->nud_state = NUD_DELAY;
1128 neigh->updated = jiffies;
1129 neigh_suspect(neigh);
1130 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1131 } else {
1132 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1133 neigh->nud_state = NUD_STALE;
1134 neigh->updated = jiffies;
1135 neigh_suspect(neigh);
1136 notify = 1;
1137 }
1138 } else if (state & NUD_DELAY) {
1139 if (time_before_eq(now,
1140 neigh->confirmed +
1141 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1142 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1143 neigh->nud_state = NUD_REACHABLE;
1144 neigh->updated = jiffies;
1145 neigh_connect(neigh);
1146 notify = 1;
1147 next = neigh->confirmed + neigh->parms->reachable_time;
1148 } else {
1149 neigh_dbg(2, "neigh %p is probed\n", neigh);
1150 neigh->nud_state = NUD_PROBE;
1151 neigh->updated = jiffies;
1152 atomic_set(&neigh->probes, 0);
1153 notify = 1;
1154 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1155 HZ/100);
1156 }
1157 } else {
1158 /* NUD_PROBE|NUD_INCOMPLETE */
1159 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1160 }
1161
1162 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1163 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1164 neigh->nud_state = NUD_FAILED;
1165 notify = 1;
1166 neigh_invalidate(neigh);
1167 goto out;
1168 }
1169
1170 if (neigh->nud_state & NUD_IN_TIMER) {
1171 if (time_before(next, jiffies + HZ/100))
1172 next = jiffies + HZ/100;
1173 if (!mod_timer(&neigh->timer, next))
1174 neigh_hold(neigh);
1175 }
1176 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1177 neigh_probe(neigh);
1178 } else {
1179out:
1180 write_unlock(&neigh->lock);
1181 }
1182
1183 if (notify)
1184 neigh_update_notify(neigh, 0);
1185
1186 trace_neigh_timer_handler(neigh, 0);
1187
1188 neigh_release(neigh);
1189}
1190
1191int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1192 const bool immediate_ok)
1193{
1194 int rc;
1195 bool immediate_probe = false;
1196
1197 write_lock_bh(&neigh->lock);
1198
1199 rc = 0;
1200 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1201 goto out_unlock_bh;
1202 if (neigh->dead)
1203 goto out_dead;
1204
1205 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1206 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1207 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1208 unsigned long next, now = jiffies;
1209
1210 atomic_set(&neigh->probes,
1211 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1212 neigh_del_timer(neigh);
1213 neigh->nud_state = NUD_INCOMPLETE;
1214 neigh->updated = now;
1215 if (!immediate_ok) {
1216 next = now + 1;
1217 } else {
1218 immediate_probe = true;
1219 next = now + max(NEIGH_VAR(neigh->parms,
1220 RETRANS_TIME),
1221 HZ / 100);
1222 }
1223 neigh_add_timer(neigh, next);
1224 } else {
1225 neigh->nud_state = NUD_FAILED;
1226 neigh->updated = jiffies;
1227 write_unlock_bh(&neigh->lock);
1228
1229 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1230 return 1;
1231 }
1232 } else if (neigh->nud_state & NUD_STALE) {
1233 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1234 neigh_del_timer(neigh);
1235 neigh->nud_state = NUD_DELAY;
1236 neigh->updated = jiffies;
1237 neigh_add_timer(neigh, jiffies +
1238 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1239 }
1240
1241 if (neigh->nud_state == NUD_INCOMPLETE) {
1242 if (skb) {
1243 while (neigh->arp_queue_len_bytes + skb->truesize >
1244 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1245 struct sk_buff *buff;
1246
1247 buff = __skb_dequeue(&neigh->arp_queue);
1248 if (!buff)
1249 break;
1250 neigh->arp_queue_len_bytes -= buff->truesize;
1251 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1252 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1253 }
1254 skb_dst_force(skb);
1255 __skb_queue_tail(&neigh->arp_queue, skb);
1256 neigh->arp_queue_len_bytes += skb->truesize;
1257 }
1258 rc = 1;
1259 }
1260out_unlock_bh:
1261 if (immediate_probe)
1262 neigh_probe(neigh);
1263 else
1264 write_unlock(&neigh->lock);
1265 local_bh_enable();
1266 trace_neigh_event_send_done(neigh, rc);
1267 return rc;
1268
1269out_dead:
1270 if (neigh->nud_state & NUD_STALE)
1271 goto out_unlock_bh;
1272 write_unlock_bh(&neigh->lock);
1273 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1274 trace_neigh_event_send_dead(neigh, 1);
1275 return 1;
1276}
1277EXPORT_SYMBOL(__neigh_event_send);
1278
1279static void neigh_update_hhs(struct neighbour *neigh)
1280{
1281 struct hh_cache *hh;
1282 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1283 = NULL;
1284
1285 if (neigh->dev->header_ops)
1286 update = neigh->dev->header_ops->cache_update;
1287
1288 if (update) {
1289 hh = &neigh->hh;
1290 if (READ_ONCE(hh->hh_len)) {
1291 write_seqlock_bh(&hh->hh_lock);
1292 update(hh, neigh->dev, neigh->ha);
1293 write_sequnlock_bh(&hh->hh_lock);
1294 }
1295 }
1296}
1297
1298/* Generic update routine.
1299 -- lladdr is new lladdr or NULL, if it is not supplied.
1300 -- new is new state.
1301 -- flags
1302 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1303 if it is different.
1304 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1305 lladdr instead of overriding it
1306 if it is different.
1307 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1308 NEIGH_UPDATE_F_USE means that the entry is user triggered.
1309 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed.
1310 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1311 NTF_ROUTER flag.
1312 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1313 a router.
1314
1315 Caller MUST hold reference count on the entry.
1316 */
1317static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1318 u8 new, u32 flags, u32 nlmsg_pid,
1319 struct netlink_ext_ack *extack)
1320{
1321 bool gc_update = false, managed_update = false;
1322 int update_isrouter = 0;
1323 struct net_device *dev;
1324 int err, notify = 0;
1325 u8 old;
1326
1327 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1328
1329 write_lock_bh(&neigh->lock);
1330
1331 dev = neigh->dev;
1332 old = neigh->nud_state;
1333 err = -EPERM;
1334
1335 if (neigh->dead) {
1336 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1337 new = old;
1338 goto out;
1339 }
1340 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1341 (old & (NUD_NOARP | NUD_PERMANENT)))
1342 goto out;
1343
1344 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update);
1345 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1346 new = old & ~NUD_PERMANENT;
1347 neigh->nud_state = new;
1348 err = 0;
1349 goto out;
1350 }
1351
1352 if (!(new & NUD_VALID)) {
1353 neigh_del_timer(neigh);
1354 if (old & NUD_CONNECTED)
1355 neigh_suspect(neigh);
1356 neigh->nud_state = new;
1357 err = 0;
1358 notify = old & NUD_VALID;
1359 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1360 (new & NUD_FAILED)) {
1361 neigh_invalidate(neigh);
1362 notify = 1;
1363 }
1364 goto out;
1365 }
1366
1367 /* Compare new lladdr with cached one */
1368 if (!dev->addr_len) {
1369 /* First case: device needs no address. */
1370 lladdr = neigh->ha;
1371 } else if (lladdr) {
1372 /* The second case: if something is already cached
1373 and a new address is proposed:
1374 - compare new & old
1375 - if they are different, check override flag
1376 */
1377 if ((old & NUD_VALID) &&
1378 !memcmp(lladdr, neigh->ha, dev->addr_len))
1379 lladdr = neigh->ha;
1380 } else {
1381 /* No address is supplied; if we know something,
1382 use it, otherwise discard the request.
1383 */
1384 err = -EINVAL;
1385 if (!(old & NUD_VALID)) {
1386 NL_SET_ERR_MSG(extack, "No link layer address given");
1387 goto out;
1388 }
1389 lladdr = neigh->ha;
1390 }
1391
1392 /* Update confirmed timestamp for neighbour entry after we
1393 * received ARP packet even if it doesn't change IP to MAC binding.
1394 */
1395 if (new & NUD_CONNECTED)
1396 neigh->confirmed = jiffies;
1397
1398 /* If entry was valid and address is not changed,
1399 do not change entry state, if new one is STALE.
1400 */
1401 err = 0;
1402 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1403 if (old & NUD_VALID) {
1404 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1405 update_isrouter = 0;
1406 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1407 (old & NUD_CONNECTED)) {
1408 lladdr = neigh->ha;
1409 new = NUD_STALE;
1410 } else
1411 goto out;
1412 } else {
1413 if (lladdr == neigh->ha && new == NUD_STALE &&
1414 !(flags & NEIGH_UPDATE_F_ADMIN))
1415 new = old;
1416 }
1417 }
1418
1419 /* Update timestamp only once we know we will make a change to the
1420 * neighbour entry. Otherwise we risk to move the locktime window with
1421 * noop updates and ignore relevant ARP updates.
1422 */
1423 if (new != old || lladdr != neigh->ha)
1424 neigh->updated = jiffies;
1425
1426 if (new != old) {
1427 neigh_del_timer(neigh);
1428 if (new & NUD_PROBE)
1429 atomic_set(&neigh->probes, 0);
1430 if (new & NUD_IN_TIMER)
1431 neigh_add_timer(neigh, (jiffies +
1432 ((new & NUD_REACHABLE) ?
1433 neigh->parms->reachable_time :
1434 0)));
1435 neigh->nud_state = new;
1436 notify = 1;
1437 }
1438
1439 if (lladdr != neigh->ha) {
1440 write_seqlock(&neigh->ha_lock);
1441 memcpy(&neigh->ha, lladdr, dev->addr_len);
1442 write_sequnlock(&neigh->ha_lock);
1443 neigh_update_hhs(neigh);
1444 if (!(new & NUD_CONNECTED))
1445 neigh->confirmed = jiffies -
1446 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1447 notify = 1;
1448 }
1449 if (new == old)
1450 goto out;
1451 if (new & NUD_CONNECTED)
1452 neigh_connect(neigh);
1453 else
1454 neigh_suspect(neigh);
1455 if (!(old & NUD_VALID)) {
1456 struct sk_buff *skb;
1457
1458 /* Again: avoid dead loop if something went wrong */
1459
1460 while (neigh->nud_state & NUD_VALID &&
1461 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1462 struct dst_entry *dst = skb_dst(skb);
1463 struct neighbour *n2, *n1 = neigh;
1464 write_unlock_bh(&neigh->lock);
1465
1466 rcu_read_lock();
1467
1468 /* Why not just use 'neigh' as-is? The problem is that
1469 * things such as shaper, eql, and sch_teql can end up
1470 * using alternative, different, neigh objects to output
1471 * the packet in the output path. So what we need to do
1472 * here is re-lookup the top-level neigh in the path so
1473 * we can reinject the packet there.
1474 */
1475 n2 = NULL;
1476 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1477 n2 = dst_neigh_lookup_skb(dst, skb);
1478 if (n2)
1479 n1 = n2;
1480 }
1481 n1->output(n1, skb);
1482 if (n2)
1483 neigh_release(n2);
1484 rcu_read_unlock();
1485
1486 write_lock_bh(&neigh->lock);
1487 }
1488 __skb_queue_purge(&neigh->arp_queue);
1489 neigh->arp_queue_len_bytes = 0;
1490 }
1491out:
1492 if (update_isrouter)
1493 neigh_update_is_router(neigh, flags, ¬ify);
1494 write_unlock_bh(&neigh->lock);
1495 if (((new ^ old) & NUD_PERMANENT) || gc_update)
1496 neigh_update_gc_list(neigh);
1497 if (managed_update)
1498 neigh_update_managed_list(neigh);
1499 if (notify)
1500 neigh_update_notify(neigh, nlmsg_pid);
1501 trace_neigh_update_done(neigh, err);
1502 return err;
1503}
1504
1505int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1506 u32 flags, u32 nlmsg_pid)
1507{
1508 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1509}
1510EXPORT_SYMBOL(neigh_update);
1511
1512/* Update the neigh to listen temporarily for probe responses, even if it is
1513 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1514 */
1515void __neigh_set_probe_once(struct neighbour *neigh)
1516{
1517 if (neigh->dead)
1518 return;
1519 neigh->updated = jiffies;
1520 if (!(neigh->nud_state & NUD_FAILED))
1521 return;
1522 neigh->nud_state = NUD_INCOMPLETE;
1523 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1524 neigh_add_timer(neigh,
1525 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1526 HZ/100));
1527}
1528EXPORT_SYMBOL(__neigh_set_probe_once);
1529
1530struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1531 u8 *lladdr, void *saddr,
1532 struct net_device *dev)
1533{
1534 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1535 lladdr || !dev->addr_len);
1536 if (neigh)
1537 neigh_update(neigh, lladdr, NUD_STALE,
1538 NEIGH_UPDATE_F_OVERRIDE, 0);
1539 return neigh;
1540}
1541EXPORT_SYMBOL(neigh_event_ns);
1542
1543/* called with read_lock_bh(&n->lock); */
1544static void neigh_hh_init(struct neighbour *n)
1545{
1546 struct net_device *dev = n->dev;
1547 __be16 prot = n->tbl->protocol;
1548 struct hh_cache *hh = &n->hh;
1549
1550 write_lock_bh(&n->lock);
1551
1552 /* Only one thread can come in here and initialize the
1553 * hh_cache entry.
1554 */
1555 if (!hh->hh_len)
1556 dev->header_ops->cache(n, hh, prot);
1557
1558 write_unlock_bh(&n->lock);
1559}
1560
1561/* Slow and careful. */
1562
1563int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1564{
1565 int rc = 0;
1566
1567 if (!neigh_event_send(neigh, skb)) {
1568 int err;
1569 struct net_device *dev = neigh->dev;
1570 unsigned int seq;
1571
1572 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1573 neigh_hh_init(neigh);
1574
1575 do {
1576 __skb_pull(skb, skb_network_offset(skb));
1577 seq = read_seqbegin(&neigh->ha_lock);
1578 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1579 neigh->ha, NULL, skb->len);
1580 } while (read_seqretry(&neigh->ha_lock, seq));
1581
1582 if (err >= 0)
1583 rc = dev_queue_xmit(skb);
1584 else
1585 goto out_kfree_skb;
1586 }
1587out:
1588 return rc;
1589out_kfree_skb:
1590 rc = -EINVAL;
1591 kfree_skb(skb);
1592 goto out;
1593}
1594EXPORT_SYMBOL(neigh_resolve_output);
1595
1596/* As fast as possible without hh cache */
1597
1598int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1599{
1600 struct net_device *dev = neigh->dev;
1601 unsigned int seq;
1602 int err;
1603
1604 do {
1605 __skb_pull(skb, skb_network_offset(skb));
1606 seq = read_seqbegin(&neigh->ha_lock);
1607 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1608 neigh->ha, NULL, skb->len);
1609 } while (read_seqretry(&neigh->ha_lock, seq));
1610
1611 if (err >= 0)
1612 err = dev_queue_xmit(skb);
1613 else {
1614 err = -EINVAL;
1615 kfree_skb(skb);
1616 }
1617 return err;
1618}
1619EXPORT_SYMBOL(neigh_connected_output);
1620
1621int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1622{
1623 return dev_queue_xmit(skb);
1624}
1625EXPORT_SYMBOL(neigh_direct_output);
1626
1627static void neigh_managed_work(struct work_struct *work)
1628{
1629 struct neigh_table *tbl = container_of(work, struct neigh_table,
1630 managed_work.work);
1631 struct neighbour *neigh;
1632
1633 write_lock_bh(&tbl->lock);
1634 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1635 neigh_event_send_probe(neigh, NULL, false);
1636 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1637 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1638 write_unlock_bh(&tbl->lock);
1639}
1640
1641static void neigh_proxy_process(struct timer_list *t)
1642{
1643 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1644 long sched_next = 0;
1645 unsigned long now = jiffies;
1646 struct sk_buff *skb, *n;
1647
1648 spin_lock(&tbl->proxy_queue.lock);
1649
1650 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1651 long tdif = NEIGH_CB(skb)->sched_next - now;
1652
1653 if (tdif <= 0) {
1654 struct net_device *dev = skb->dev;
1655
1656 neigh_parms_qlen_dec(dev, tbl->family);
1657 __skb_unlink(skb, &tbl->proxy_queue);
1658
1659 if (tbl->proxy_redo && netif_running(dev)) {
1660 rcu_read_lock();
1661 tbl->proxy_redo(skb);
1662 rcu_read_unlock();
1663 } else {
1664 kfree_skb(skb);
1665 }
1666
1667 dev_put(dev);
1668 } else if (!sched_next || tdif < sched_next)
1669 sched_next = tdif;
1670 }
1671 del_timer(&tbl->proxy_timer);
1672 if (sched_next)
1673 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1674 spin_unlock(&tbl->proxy_queue.lock);
1675}
1676
1677void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1678 struct sk_buff *skb)
1679{
1680 unsigned long sched_next = jiffies +
1681 get_random_u32_below(NEIGH_VAR(p, PROXY_DELAY));
1682
1683 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1684 kfree_skb(skb);
1685 return;
1686 }
1687
1688 NEIGH_CB(skb)->sched_next = sched_next;
1689 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1690
1691 spin_lock(&tbl->proxy_queue.lock);
1692 if (del_timer(&tbl->proxy_timer)) {
1693 if (time_before(tbl->proxy_timer.expires, sched_next))
1694 sched_next = tbl->proxy_timer.expires;
1695 }
1696 skb_dst_drop(skb);
1697 dev_hold(skb->dev);
1698 __skb_queue_tail(&tbl->proxy_queue, skb);
1699 p->qlen++;
1700 mod_timer(&tbl->proxy_timer, sched_next);
1701 spin_unlock(&tbl->proxy_queue.lock);
1702}
1703EXPORT_SYMBOL(pneigh_enqueue);
1704
1705static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1706 struct net *net, int ifindex)
1707{
1708 struct neigh_parms *p;
1709
1710 list_for_each_entry(p, &tbl->parms_list, list) {
1711 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1712 (!p->dev && !ifindex && net_eq(net, &init_net)))
1713 return p;
1714 }
1715
1716 return NULL;
1717}
1718
1719struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1720 struct neigh_table *tbl)
1721{
1722 struct neigh_parms *p;
1723 struct net *net = dev_net(dev);
1724 const struct net_device_ops *ops = dev->netdev_ops;
1725
1726 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1727 if (p) {
1728 p->tbl = tbl;
1729 refcount_set(&p->refcnt, 1);
1730 p->reachable_time =
1731 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1732 p->qlen = 0;
1733 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1734 p->dev = dev;
1735 write_pnet(&p->net, net);
1736 p->sysctl_table = NULL;
1737
1738 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1739 netdev_put(dev, &p->dev_tracker);
1740 kfree(p);
1741 return NULL;
1742 }
1743
1744 write_lock_bh(&tbl->lock);
1745 list_add(&p->list, &tbl->parms.list);
1746 write_unlock_bh(&tbl->lock);
1747
1748 neigh_parms_data_state_cleanall(p);
1749 }
1750 return p;
1751}
1752EXPORT_SYMBOL(neigh_parms_alloc);
1753
1754static void neigh_rcu_free_parms(struct rcu_head *head)
1755{
1756 struct neigh_parms *parms =
1757 container_of(head, struct neigh_parms, rcu_head);
1758
1759 neigh_parms_put(parms);
1760}
1761
1762void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1763{
1764 if (!parms || parms == &tbl->parms)
1765 return;
1766 write_lock_bh(&tbl->lock);
1767 list_del(&parms->list);
1768 parms->dead = 1;
1769 write_unlock_bh(&tbl->lock);
1770 netdev_put(parms->dev, &parms->dev_tracker);
1771 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1772}
1773EXPORT_SYMBOL(neigh_parms_release);
1774
1775static void neigh_parms_destroy(struct neigh_parms *parms)
1776{
1777 kfree(parms);
1778}
1779
1780static struct lock_class_key neigh_table_proxy_queue_class;
1781
1782static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1783
1784void neigh_table_init(int index, struct neigh_table *tbl)
1785{
1786 unsigned long now = jiffies;
1787 unsigned long phsize;
1788
1789 INIT_LIST_HEAD(&tbl->parms_list);
1790 INIT_LIST_HEAD(&tbl->gc_list);
1791 INIT_LIST_HEAD(&tbl->managed_list);
1792
1793 list_add(&tbl->parms.list, &tbl->parms_list);
1794 write_pnet(&tbl->parms.net, &init_net);
1795 refcount_set(&tbl->parms.refcnt, 1);
1796 tbl->parms.reachable_time =
1797 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1798 tbl->parms.qlen = 0;
1799
1800 tbl->stats = alloc_percpu(struct neigh_statistics);
1801 if (!tbl->stats)
1802 panic("cannot create neighbour cache statistics");
1803
1804#ifdef CONFIG_PROC_FS
1805 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1806 &neigh_stat_seq_ops, tbl))
1807 panic("cannot create neighbour proc dir entry");
1808#endif
1809
1810 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1811
1812 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1813 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1814
1815 if (!tbl->nht || !tbl->phash_buckets)
1816 panic("cannot allocate neighbour cache hashes");
1817
1818 if (!tbl->entry_size)
1819 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1820 tbl->key_len, NEIGH_PRIV_ALIGN);
1821 else
1822 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1823
1824 rwlock_init(&tbl->lock);
1825
1826 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1827 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1828 tbl->parms.reachable_time);
1829 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1830 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1831
1832 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1833 skb_queue_head_init_class(&tbl->proxy_queue,
1834 &neigh_table_proxy_queue_class);
1835
1836 tbl->last_flush = now;
1837 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1838
1839 neigh_tables[index] = tbl;
1840}
1841EXPORT_SYMBOL(neigh_table_init);
1842
1843int neigh_table_clear(int index, struct neigh_table *tbl)
1844{
1845 neigh_tables[index] = NULL;
1846 /* It is not clean... Fix it to unload IPv6 module safely */
1847 cancel_delayed_work_sync(&tbl->managed_work);
1848 cancel_delayed_work_sync(&tbl->gc_work);
1849 del_timer_sync(&tbl->proxy_timer);
1850 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1851 neigh_ifdown(tbl, NULL);
1852 if (atomic_read(&tbl->entries))
1853 pr_crit("neighbour leakage\n");
1854
1855 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1856 neigh_hash_free_rcu);
1857 tbl->nht = NULL;
1858
1859 kfree(tbl->phash_buckets);
1860 tbl->phash_buckets = NULL;
1861
1862 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1863
1864 free_percpu(tbl->stats);
1865 tbl->stats = NULL;
1866
1867 return 0;
1868}
1869EXPORT_SYMBOL(neigh_table_clear);
1870
1871static struct neigh_table *neigh_find_table(int family)
1872{
1873 struct neigh_table *tbl = NULL;
1874
1875 switch (family) {
1876 case AF_INET:
1877 tbl = neigh_tables[NEIGH_ARP_TABLE];
1878 break;
1879 case AF_INET6:
1880 tbl = neigh_tables[NEIGH_ND_TABLE];
1881 break;
1882 }
1883
1884 return tbl;
1885}
1886
1887const struct nla_policy nda_policy[NDA_MAX+1] = {
1888 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1889 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1890 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1891 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1892 [NDA_PROBES] = { .type = NLA_U32 },
1893 [NDA_VLAN] = { .type = NLA_U16 },
1894 [NDA_PORT] = { .type = NLA_U16 },
1895 [NDA_VNI] = { .type = NLA_U32 },
1896 [NDA_IFINDEX] = { .type = NLA_U32 },
1897 [NDA_MASTER] = { .type = NLA_U32 },
1898 [NDA_PROTOCOL] = { .type = NLA_U8 },
1899 [NDA_NH_ID] = { .type = NLA_U32 },
1900 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1901 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1902};
1903
1904static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1905 struct netlink_ext_ack *extack)
1906{
1907 struct net *net = sock_net(skb->sk);
1908 struct ndmsg *ndm;
1909 struct nlattr *dst_attr;
1910 struct neigh_table *tbl;
1911 struct neighbour *neigh;
1912 struct net_device *dev = NULL;
1913 int err = -EINVAL;
1914
1915 ASSERT_RTNL();
1916 if (nlmsg_len(nlh) < sizeof(*ndm))
1917 goto out;
1918
1919 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1920 if (!dst_attr) {
1921 NL_SET_ERR_MSG(extack, "Network address not specified");
1922 goto out;
1923 }
1924
1925 ndm = nlmsg_data(nlh);
1926 if (ndm->ndm_ifindex) {
1927 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1928 if (dev == NULL) {
1929 err = -ENODEV;
1930 goto out;
1931 }
1932 }
1933
1934 tbl = neigh_find_table(ndm->ndm_family);
1935 if (tbl == NULL)
1936 return -EAFNOSUPPORT;
1937
1938 if (nla_len(dst_attr) < (int)tbl->key_len) {
1939 NL_SET_ERR_MSG(extack, "Invalid network address");
1940 goto out;
1941 }
1942
1943 if (ndm->ndm_flags & NTF_PROXY) {
1944 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1945 goto out;
1946 }
1947
1948 if (dev == NULL)
1949 goto out;
1950
1951 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1952 if (neigh == NULL) {
1953 err = -ENOENT;
1954 goto out;
1955 }
1956
1957 err = __neigh_update(neigh, NULL, NUD_FAILED,
1958 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1959 NETLINK_CB(skb).portid, extack);
1960 write_lock_bh(&tbl->lock);
1961 neigh_release(neigh);
1962 neigh_remove_one(neigh, tbl);
1963 write_unlock_bh(&tbl->lock);
1964
1965out:
1966 return err;
1967}
1968
1969static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1970 struct netlink_ext_ack *extack)
1971{
1972 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1973 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1974 struct net *net = sock_net(skb->sk);
1975 struct ndmsg *ndm;
1976 struct nlattr *tb[NDA_MAX+1];
1977 struct neigh_table *tbl;
1978 struct net_device *dev = NULL;
1979 struct neighbour *neigh;
1980 void *dst, *lladdr;
1981 u8 protocol = 0;
1982 u32 ndm_flags;
1983 int err;
1984
1985 ASSERT_RTNL();
1986 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1987 nda_policy, extack);
1988 if (err < 0)
1989 goto out;
1990
1991 err = -EINVAL;
1992 if (!tb[NDA_DST]) {
1993 NL_SET_ERR_MSG(extack, "Network address not specified");
1994 goto out;
1995 }
1996
1997 ndm = nlmsg_data(nlh);
1998 ndm_flags = ndm->ndm_flags;
1999 if (tb[NDA_FLAGS_EXT]) {
2000 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
2001
2002 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
2003 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
2004 hweight32(NTF_EXT_MASK)));
2005 ndm_flags |= (ext << NTF_EXT_SHIFT);
2006 }
2007 if (ndm->ndm_ifindex) {
2008 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2009 if (dev == NULL) {
2010 err = -ENODEV;
2011 goto out;
2012 }
2013
2014 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
2015 NL_SET_ERR_MSG(extack, "Invalid link address");
2016 goto out;
2017 }
2018 }
2019
2020 tbl = neigh_find_table(ndm->ndm_family);
2021 if (tbl == NULL)
2022 return -EAFNOSUPPORT;
2023
2024 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2025 NL_SET_ERR_MSG(extack, "Invalid network address");
2026 goto out;
2027 }
2028
2029 dst = nla_data(tb[NDA_DST]);
2030 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
2031
2032 if (tb[NDA_PROTOCOL])
2033 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
2034 if (ndm_flags & NTF_PROXY) {
2035 struct pneigh_entry *pn;
2036
2037 if (ndm_flags & NTF_MANAGED) {
2038 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
2039 goto out;
2040 }
2041
2042 err = -ENOBUFS;
2043 pn = pneigh_lookup(tbl, net, dst, dev, 1);
2044 if (pn) {
2045 pn->flags = ndm_flags;
2046 if (protocol)
2047 pn->protocol = protocol;
2048 err = 0;
2049 }
2050 goto out;
2051 }
2052
2053 if (!dev) {
2054 NL_SET_ERR_MSG(extack, "Device not specified");
2055 goto out;
2056 }
2057
2058 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2059 err = -EINVAL;
2060 goto out;
2061 }
2062
2063 neigh = neigh_lookup(tbl, dst, dev);
2064 if (neigh == NULL) {
2065 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
2066 bool exempt_from_gc = ndm_permanent ||
2067 ndm_flags & NTF_EXT_LEARNED;
2068
2069 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2070 err = -ENOENT;
2071 goto out;
2072 }
2073 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2074 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2075 err = -EINVAL;
2076 goto out;
2077 }
2078
2079 neigh = ___neigh_create(tbl, dst, dev,
2080 ndm_flags &
2081 (NTF_EXT_LEARNED | NTF_MANAGED),
2082 exempt_from_gc, true);
2083 if (IS_ERR(neigh)) {
2084 err = PTR_ERR(neigh);
2085 goto out;
2086 }
2087 } else {
2088 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2089 err = -EEXIST;
2090 neigh_release(neigh);
2091 goto out;
2092 }
2093
2094 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2095 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2096 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2097 }
2098
2099 if (protocol)
2100 neigh->protocol = protocol;
2101 if (ndm_flags & NTF_EXT_LEARNED)
2102 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2103 if (ndm_flags & NTF_ROUTER)
2104 flags |= NEIGH_UPDATE_F_ISROUTER;
2105 if (ndm_flags & NTF_MANAGED)
2106 flags |= NEIGH_UPDATE_F_MANAGED;
2107 if (ndm_flags & NTF_USE)
2108 flags |= NEIGH_UPDATE_F_USE;
2109
2110 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2111 NETLINK_CB(skb).portid, extack);
2112 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2113 neigh_event_send(neigh, NULL);
2114 err = 0;
2115 }
2116 neigh_release(neigh);
2117out:
2118 return err;
2119}
2120
2121static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2122{
2123 struct nlattr *nest;
2124
2125 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2126 if (nest == NULL)
2127 return -ENOBUFS;
2128
2129 if ((parms->dev &&
2130 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2131 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2132 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2133 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2134 /* approximative value for deprecated QUEUE_LEN (in packets) */
2135 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2136 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2137 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2138 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2139 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2140 NEIGH_VAR(parms, UCAST_PROBES)) ||
2141 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2142 NEIGH_VAR(parms, MCAST_PROBES)) ||
2143 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2144 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2145 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2146 NDTPA_PAD) ||
2147 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2148 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2149 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2150 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2151 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2152 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2153 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2154 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2155 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2156 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2157 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2158 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2159 nla_put_msecs(skb, NDTPA_LOCKTIME,
2160 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2161 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2162 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2163 goto nla_put_failure;
2164 return nla_nest_end(skb, nest);
2165
2166nla_put_failure:
2167 nla_nest_cancel(skb, nest);
2168 return -EMSGSIZE;
2169}
2170
2171static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2172 u32 pid, u32 seq, int type, int flags)
2173{
2174 struct nlmsghdr *nlh;
2175 struct ndtmsg *ndtmsg;
2176
2177 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2178 if (nlh == NULL)
2179 return -EMSGSIZE;
2180
2181 ndtmsg = nlmsg_data(nlh);
2182
2183 read_lock_bh(&tbl->lock);
2184 ndtmsg->ndtm_family = tbl->family;
2185 ndtmsg->ndtm_pad1 = 0;
2186 ndtmsg->ndtm_pad2 = 0;
2187
2188 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2189 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2190 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2191 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2192 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2193 goto nla_put_failure;
2194 {
2195 unsigned long now = jiffies;
2196 long flush_delta = now - tbl->last_flush;
2197 long rand_delta = now - tbl->last_rand;
2198 struct neigh_hash_table *nht;
2199 struct ndt_config ndc = {
2200 .ndtc_key_len = tbl->key_len,
2201 .ndtc_entry_size = tbl->entry_size,
2202 .ndtc_entries = atomic_read(&tbl->entries),
2203 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2204 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2205 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2206 };
2207
2208 rcu_read_lock_bh();
2209 nht = rcu_dereference_bh(tbl->nht);
2210 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2211 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2212 rcu_read_unlock_bh();
2213
2214 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2215 goto nla_put_failure;
2216 }
2217
2218 {
2219 int cpu;
2220 struct ndt_stats ndst;
2221
2222 memset(&ndst, 0, sizeof(ndst));
2223
2224 for_each_possible_cpu(cpu) {
2225 struct neigh_statistics *st;
2226
2227 st = per_cpu_ptr(tbl->stats, cpu);
2228 ndst.ndts_allocs += st->allocs;
2229 ndst.ndts_destroys += st->destroys;
2230 ndst.ndts_hash_grows += st->hash_grows;
2231 ndst.ndts_res_failed += st->res_failed;
2232 ndst.ndts_lookups += st->lookups;
2233 ndst.ndts_hits += st->hits;
2234 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2235 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2236 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2237 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
2238 ndst.ndts_table_fulls += st->table_fulls;
2239 }
2240
2241 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2242 NDTA_PAD))
2243 goto nla_put_failure;
2244 }
2245
2246 BUG_ON(tbl->parms.dev);
2247 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2248 goto nla_put_failure;
2249
2250 read_unlock_bh(&tbl->lock);
2251 nlmsg_end(skb, nlh);
2252 return 0;
2253
2254nla_put_failure:
2255 read_unlock_bh(&tbl->lock);
2256 nlmsg_cancel(skb, nlh);
2257 return -EMSGSIZE;
2258}
2259
2260static int neightbl_fill_param_info(struct sk_buff *skb,
2261 struct neigh_table *tbl,
2262 struct neigh_parms *parms,
2263 u32 pid, u32 seq, int type,
2264 unsigned int flags)
2265{
2266 struct ndtmsg *ndtmsg;
2267 struct nlmsghdr *nlh;
2268
2269 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2270 if (nlh == NULL)
2271 return -EMSGSIZE;
2272
2273 ndtmsg = nlmsg_data(nlh);
2274
2275 read_lock_bh(&tbl->lock);
2276 ndtmsg->ndtm_family = tbl->family;
2277 ndtmsg->ndtm_pad1 = 0;
2278 ndtmsg->ndtm_pad2 = 0;
2279
2280 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2281 neightbl_fill_parms(skb, parms) < 0)
2282 goto errout;
2283
2284 read_unlock_bh(&tbl->lock);
2285 nlmsg_end(skb, nlh);
2286 return 0;
2287errout:
2288 read_unlock_bh(&tbl->lock);
2289 nlmsg_cancel(skb, nlh);
2290 return -EMSGSIZE;
2291}
2292
2293static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2294 [NDTA_NAME] = { .type = NLA_STRING },
2295 [NDTA_THRESH1] = { .type = NLA_U32 },
2296 [NDTA_THRESH2] = { .type = NLA_U32 },
2297 [NDTA_THRESH3] = { .type = NLA_U32 },
2298 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2299 [NDTA_PARMS] = { .type = NLA_NESTED },
2300};
2301
2302static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2303 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2304 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2305 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2306 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2307 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2308 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2309 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2310 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2311 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2312 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2313 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2314 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2315 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2316 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2317 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 },
2318};
2319
2320static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2321 struct netlink_ext_ack *extack)
2322{
2323 struct net *net = sock_net(skb->sk);
2324 struct neigh_table *tbl;
2325 struct ndtmsg *ndtmsg;
2326 struct nlattr *tb[NDTA_MAX+1];
2327 bool found = false;
2328 int err, tidx;
2329
2330 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2331 nl_neightbl_policy, extack);
2332 if (err < 0)
2333 goto errout;
2334
2335 if (tb[NDTA_NAME] == NULL) {
2336 err = -EINVAL;
2337 goto errout;
2338 }
2339
2340 ndtmsg = nlmsg_data(nlh);
2341
2342 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2343 tbl = neigh_tables[tidx];
2344 if (!tbl)
2345 continue;
2346 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2347 continue;
2348 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2349 found = true;
2350 break;
2351 }
2352 }
2353
2354 if (!found)
2355 return -ENOENT;
2356
2357 /*
2358 * We acquire tbl->lock to be nice to the periodic timers and
2359 * make sure they always see a consistent set of values.
2360 */
2361 write_lock_bh(&tbl->lock);
2362
2363 if (tb[NDTA_PARMS]) {
2364 struct nlattr *tbp[NDTPA_MAX+1];
2365 struct neigh_parms *p;
2366 int i, ifindex = 0;
2367
2368 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2369 tb[NDTA_PARMS],
2370 nl_ntbl_parm_policy, extack);
2371 if (err < 0)
2372 goto errout_tbl_lock;
2373
2374 if (tbp[NDTPA_IFINDEX])
2375 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2376
2377 p = lookup_neigh_parms(tbl, net, ifindex);
2378 if (p == NULL) {
2379 err = -ENOENT;
2380 goto errout_tbl_lock;
2381 }
2382
2383 for (i = 1; i <= NDTPA_MAX; i++) {
2384 if (tbp[i] == NULL)
2385 continue;
2386
2387 switch (i) {
2388 case NDTPA_QUEUE_LEN:
2389 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2390 nla_get_u32(tbp[i]) *
2391 SKB_TRUESIZE(ETH_FRAME_LEN));
2392 break;
2393 case NDTPA_QUEUE_LENBYTES:
2394 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2395 nla_get_u32(tbp[i]));
2396 break;
2397 case NDTPA_PROXY_QLEN:
2398 NEIGH_VAR_SET(p, PROXY_QLEN,
2399 nla_get_u32(tbp[i]));
2400 break;
2401 case NDTPA_APP_PROBES:
2402 NEIGH_VAR_SET(p, APP_PROBES,
2403 nla_get_u32(tbp[i]));
2404 break;
2405 case NDTPA_UCAST_PROBES:
2406 NEIGH_VAR_SET(p, UCAST_PROBES,
2407 nla_get_u32(tbp[i]));
2408 break;
2409 case NDTPA_MCAST_PROBES:
2410 NEIGH_VAR_SET(p, MCAST_PROBES,
2411 nla_get_u32(tbp[i]));
2412 break;
2413 case NDTPA_MCAST_REPROBES:
2414 NEIGH_VAR_SET(p, MCAST_REPROBES,
2415 nla_get_u32(tbp[i]));
2416 break;
2417 case NDTPA_BASE_REACHABLE_TIME:
2418 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2419 nla_get_msecs(tbp[i]));
2420 /* update reachable_time as well, otherwise, the change will
2421 * only be effective after the next time neigh_periodic_work
2422 * decides to recompute it (can be multiple minutes)
2423 */
2424 p->reachable_time =
2425 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2426 break;
2427 case NDTPA_GC_STALETIME:
2428 NEIGH_VAR_SET(p, GC_STALETIME,
2429 nla_get_msecs(tbp[i]));
2430 break;
2431 case NDTPA_DELAY_PROBE_TIME:
2432 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2433 nla_get_msecs(tbp[i]));
2434 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2435 break;
2436 case NDTPA_INTERVAL_PROBE_TIME_MS:
2437 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2438 nla_get_msecs(tbp[i]));
2439 break;
2440 case NDTPA_RETRANS_TIME:
2441 NEIGH_VAR_SET(p, RETRANS_TIME,
2442 nla_get_msecs(tbp[i]));
2443 break;
2444 case NDTPA_ANYCAST_DELAY:
2445 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2446 nla_get_msecs(tbp[i]));
2447 break;
2448 case NDTPA_PROXY_DELAY:
2449 NEIGH_VAR_SET(p, PROXY_DELAY,
2450 nla_get_msecs(tbp[i]));
2451 break;
2452 case NDTPA_LOCKTIME:
2453 NEIGH_VAR_SET(p, LOCKTIME,
2454 nla_get_msecs(tbp[i]));
2455 break;
2456 }
2457 }
2458 }
2459
2460 err = -ENOENT;
2461 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2462 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2463 !net_eq(net, &init_net))
2464 goto errout_tbl_lock;
2465
2466 if (tb[NDTA_THRESH1])
2467 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2468
2469 if (tb[NDTA_THRESH2])
2470 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2471
2472 if (tb[NDTA_THRESH3])
2473 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2474
2475 if (tb[NDTA_GC_INTERVAL])
2476 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2477
2478 err = 0;
2479
2480errout_tbl_lock:
2481 write_unlock_bh(&tbl->lock);
2482errout:
2483 return err;
2484}
2485
2486static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2487 struct netlink_ext_ack *extack)
2488{
2489 struct ndtmsg *ndtm;
2490
2491 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2492 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2493 return -EINVAL;
2494 }
2495
2496 ndtm = nlmsg_data(nlh);
2497 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2498 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2499 return -EINVAL;
2500 }
2501
2502 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2503 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2504 return -EINVAL;
2505 }
2506
2507 return 0;
2508}
2509
2510static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2511{
2512 const struct nlmsghdr *nlh = cb->nlh;
2513 struct net *net = sock_net(skb->sk);
2514 int family, tidx, nidx = 0;
2515 int tbl_skip = cb->args[0];
2516 int neigh_skip = cb->args[1];
2517 struct neigh_table *tbl;
2518
2519 if (cb->strict_check) {
2520 int err = neightbl_valid_dump_info(nlh, cb->extack);
2521
2522 if (err < 0)
2523 return err;
2524 }
2525
2526 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2527
2528 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2529 struct neigh_parms *p;
2530
2531 tbl = neigh_tables[tidx];
2532 if (!tbl)
2533 continue;
2534
2535 if (tidx < tbl_skip || (family && tbl->family != family))
2536 continue;
2537
2538 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2539 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2540 NLM_F_MULTI) < 0)
2541 break;
2542
2543 nidx = 0;
2544 p = list_next_entry(&tbl->parms, list);
2545 list_for_each_entry_from(p, &tbl->parms_list, list) {
2546 if (!net_eq(neigh_parms_net(p), net))
2547 continue;
2548
2549 if (nidx < neigh_skip)
2550 goto next;
2551
2552 if (neightbl_fill_param_info(skb, tbl, p,
2553 NETLINK_CB(cb->skb).portid,
2554 nlh->nlmsg_seq,
2555 RTM_NEWNEIGHTBL,
2556 NLM_F_MULTI) < 0)
2557 goto out;
2558 next:
2559 nidx++;
2560 }
2561
2562 neigh_skip = 0;
2563 }
2564out:
2565 cb->args[0] = tidx;
2566 cb->args[1] = nidx;
2567
2568 return skb->len;
2569}
2570
2571static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2572 u32 pid, u32 seq, int type, unsigned int flags)
2573{
2574 u32 neigh_flags, neigh_flags_ext;
2575 unsigned long now = jiffies;
2576 struct nda_cacheinfo ci;
2577 struct nlmsghdr *nlh;
2578 struct ndmsg *ndm;
2579
2580 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2581 if (nlh == NULL)
2582 return -EMSGSIZE;
2583
2584 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2585 neigh_flags = neigh->flags & NTF_OLD_MASK;
2586
2587 ndm = nlmsg_data(nlh);
2588 ndm->ndm_family = neigh->ops->family;
2589 ndm->ndm_pad1 = 0;
2590 ndm->ndm_pad2 = 0;
2591 ndm->ndm_flags = neigh_flags;
2592 ndm->ndm_type = neigh->type;
2593 ndm->ndm_ifindex = neigh->dev->ifindex;
2594
2595 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2596 goto nla_put_failure;
2597
2598 read_lock_bh(&neigh->lock);
2599 ndm->ndm_state = neigh->nud_state;
2600 if (neigh->nud_state & NUD_VALID) {
2601 char haddr[MAX_ADDR_LEN];
2602
2603 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2604 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2605 read_unlock_bh(&neigh->lock);
2606 goto nla_put_failure;
2607 }
2608 }
2609
2610 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2611 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2612 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2613 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2614 read_unlock_bh(&neigh->lock);
2615
2616 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2617 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2618 goto nla_put_failure;
2619
2620 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2621 goto nla_put_failure;
2622 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2623 goto nla_put_failure;
2624
2625 nlmsg_end(skb, nlh);
2626 return 0;
2627
2628nla_put_failure:
2629 nlmsg_cancel(skb, nlh);
2630 return -EMSGSIZE;
2631}
2632
2633static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2634 u32 pid, u32 seq, int type, unsigned int flags,
2635 struct neigh_table *tbl)
2636{
2637 u32 neigh_flags, neigh_flags_ext;
2638 struct nlmsghdr *nlh;
2639 struct ndmsg *ndm;
2640
2641 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2642 if (nlh == NULL)
2643 return -EMSGSIZE;
2644
2645 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2646 neigh_flags = pn->flags & NTF_OLD_MASK;
2647
2648 ndm = nlmsg_data(nlh);
2649 ndm->ndm_family = tbl->family;
2650 ndm->ndm_pad1 = 0;
2651 ndm->ndm_pad2 = 0;
2652 ndm->ndm_flags = neigh_flags | NTF_PROXY;
2653 ndm->ndm_type = RTN_UNICAST;
2654 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2655 ndm->ndm_state = NUD_NONE;
2656
2657 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2658 goto nla_put_failure;
2659
2660 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2661 goto nla_put_failure;
2662 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2663 goto nla_put_failure;
2664
2665 nlmsg_end(skb, nlh);
2666 return 0;
2667
2668nla_put_failure:
2669 nlmsg_cancel(skb, nlh);
2670 return -EMSGSIZE;
2671}
2672
2673static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2674{
2675 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2676 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2677}
2678
2679static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2680{
2681 struct net_device *master;
2682
2683 if (!master_idx)
2684 return false;
2685
2686 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2687
2688 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2689 * invalid value for ifindex to denote "no master".
2690 */
2691 if (master_idx == -1)
2692 return !!master;
2693
2694 if (!master || master->ifindex != master_idx)
2695 return true;
2696
2697 return false;
2698}
2699
2700static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2701{
2702 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2703 return true;
2704
2705 return false;
2706}
2707
2708struct neigh_dump_filter {
2709 int master_idx;
2710 int dev_idx;
2711};
2712
2713static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2714 struct netlink_callback *cb,
2715 struct neigh_dump_filter *filter)
2716{
2717 struct net *net = sock_net(skb->sk);
2718 struct neighbour *n;
2719 int rc, h, s_h = cb->args[1];
2720 int idx, s_idx = idx = cb->args[2];
2721 struct neigh_hash_table *nht;
2722 unsigned int flags = NLM_F_MULTI;
2723
2724 if (filter->dev_idx || filter->master_idx)
2725 flags |= NLM_F_DUMP_FILTERED;
2726
2727 rcu_read_lock_bh();
2728 nht = rcu_dereference_bh(tbl->nht);
2729
2730 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2731 if (h > s_h)
2732 s_idx = 0;
2733 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2734 n != NULL;
2735 n = rcu_dereference_bh(n->next)) {
2736 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2737 goto next;
2738 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2739 neigh_master_filtered(n->dev, filter->master_idx))
2740 goto next;
2741 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2742 cb->nlh->nlmsg_seq,
2743 RTM_NEWNEIGH,
2744 flags) < 0) {
2745 rc = -1;
2746 goto out;
2747 }
2748next:
2749 idx++;
2750 }
2751 }
2752 rc = skb->len;
2753out:
2754 rcu_read_unlock_bh();
2755 cb->args[1] = h;
2756 cb->args[2] = idx;
2757 return rc;
2758}
2759
2760static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2761 struct netlink_callback *cb,
2762 struct neigh_dump_filter *filter)
2763{
2764 struct pneigh_entry *n;
2765 struct net *net = sock_net(skb->sk);
2766 int rc, h, s_h = cb->args[3];
2767 int idx, s_idx = idx = cb->args[4];
2768 unsigned int flags = NLM_F_MULTI;
2769
2770 if (filter->dev_idx || filter->master_idx)
2771 flags |= NLM_F_DUMP_FILTERED;
2772
2773 read_lock_bh(&tbl->lock);
2774
2775 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2776 if (h > s_h)
2777 s_idx = 0;
2778 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2779 if (idx < s_idx || pneigh_net(n) != net)
2780 goto next;
2781 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2782 neigh_master_filtered(n->dev, filter->master_idx))
2783 goto next;
2784 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2785 cb->nlh->nlmsg_seq,
2786 RTM_NEWNEIGH, flags, tbl) < 0) {
2787 read_unlock_bh(&tbl->lock);
2788 rc = -1;
2789 goto out;
2790 }
2791 next:
2792 idx++;
2793 }
2794 }
2795
2796 read_unlock_bh(&tbl->lock);
2797 rc = skb->len;
2798out:
2799 cb->args[3] = h;
2800 cb->args[4] = idx;
2801 return rc;
2802
2803}
2804
2805static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2806 bool strict_check,
2807 struct neigh_dump_filter *filter,
2808 struct netlink_ext_ack *extack)
2809{
2810 struct nlattr *tb[NDA_MAX + 1];
2811 int err, i;
2812
2813 if (strict_check) {
2814 struct ndmsg *ndm;
2815
2816 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2817 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2818 return -EINVAL;
2819 }
2820
2821 ndm = nlmsg_data(nlh);
2822 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2823 ndm->ndm_state || ndm->ndm_type) {
2824 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2825 return -EINVAL;
2826 }
2827
2828 if (ndm->ndm_flags & ~NTF_PROXY) {
2829 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2830 return -EINVAL;
2831 }
2832
2833 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2834 tb, NDA_MAX, nda_policy,
2835 extack);
2836 } else {
2837 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2838 NDA_MAX, nda_policy, extack);
2839 }
2840 if (err < 0)
2841 return err;
2842
2843 for (i = 0; i <= NDA_MAX; ++i) {
2844 if (!tb[i])
2845 continue;
2846
2847 /* all new attributes should require strict_check */
2848 switch (i) {
2849 case NDA_IFINDEX:
2850 filter->dev_idx = nla_get_u32(tb[i]);
2851 break;
2852 case NDA_MASTER:
2853 filter->master_idx = nla_get_u32(tb[i]);
2854 break;
2855 default:
2856 if (strict_check) {
2857 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2858 return -EINVAL;
2859 }
2860 }
2861 }
2862
2863 return 0;
2864}
2865
2866static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2867{
2868 const struct nlmsghdr *nlh = cb->nlh;
2869 struct neigh_dump_filter filter = {};
2870 struct neigh_table *tbl;
2871 int t, family, s_t;
2872 int proxy = 0;
2873 int err;
2874
2875 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2876
2877 /* check for full ndmsg structure presence, family member is
2878 * the same for both structures
2879 */
2880 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2881 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2882 proxy = 1;
2883
2884 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2885 if (err < 0 && cb->strict_check)
2886 return err;
2887
2888 s_t = cb->args[0];
2889
2890 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2891 tbl = neigh_tables[t];
2892
2893 if (!tbl)
2894 continue;
2895 if (t < s_t || (family && tbl->family != family))
2896 continue;
2897 if (t > s_t)
2898 memset(&cb->args[1], 0, sizeof(cb->args) -
2899 sizeof(cb->args[0]));
2900 if (proxy)
2901 err = pneigh_dump_table(tbl, skb, cb, &filter);
2902 else
2903 err = neigh_dump_table(tbl, skb, cb, &filter);
2904 if (err < 0)
2905 break;
2906 }
2907
2908 cb->args[0] = t;
2909 return skb->len;
2910}
2911
2912static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2913 struct neigh_table **tbl,
2914 void **dst, int *dev_idx, u8 *ndm_flags,
2915 struct netlink_ext_ack *extack)
2916{
2917 struct nlattr *tb[NDA_MAX + 1];
2918 struct ndmsg *ndm;
2919 int err, i;
2920
2921 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2922 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2923 return -EINVAL;
2924 }
2925
2926 ndm = nlmsg_data(nlh);
2927 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2928 ndm->ndm_type) {
2929 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2930 return -EINVAL;
2931 }
2932
2933 if (ndm->ndm_flags & ~NTF_PROXY) {
2934 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2935 return -EINVAL;
2936 }
2937
2938 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2939 NDA_MAX, nda_policy, extack);
2940 if (err < 0)
2941 return err;
2942
2943 *ndm_flags = ndm->ndm_flags;
2944 *dev_idx = ndm->ndm_ifindex;
2945 *tbl = neigh_find_table(ndm->ndm_family);
2946 if (*tbl == NULL) {
2947 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2948 return -EAFNOSUPPORT;
2949 }
2950
2951 for (i = 0; i <= NDA_MAX; ++i) {
2952 if (!tb[i])
2953 continue;
2954
2955 switch (i) {
2956 case NDA_DST:
2957 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2958 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2959 return -EINVAL;
2960 }
2961 *dst = nla_data(tb[i]);
2962 break;
2963 default:
2964 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2965 return -EINVAL;
2966 }
2967 }
2968
2969 return 0;
2970}
2971
2972static inline size_t neigh_nlmsg_size(void)
2973{
2974 return NLMSG_ALIGN(sizeof(struct ndmsg))
2975 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2976 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2977 + nla_total_size(sizeof(struct nda_cacheinfo))
2978 + nla_total_size(4) /* NDA_PROBES */
2979 + nla_total_size(4) /* NDA_FLAGS_EXT */
2980 + nla_total_size(1); /* NDA_PROTOCOL */
2981}
2982
2983static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2984 u32 pid, u32 seq)
2985{
2986 struct sk_buff *skb;
2987 int err = 0;
2988
2989 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2990 if (!skb)
2991 return -ENOBUFS;
2992
2993 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2994 if (err) {
2995 kfree_skb(skb);
2996 goto errout;
2997 }
2998
2999 err = rtnl_unicast(skb, net, pid);
3000errout:
3001 return err;
3002}
3003
3004static inline size_t pneigh_nlmsg_size(void)
3005{
3006 return NLMSG_ALIGN(sizeof(struct ndmsg))
3007 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
3008 + nla_total_size(4) /* NDA_FLAGS_EXT */
3009 + nla_total_size(1); /* NDA_PROTOCOL */
3010}
3011
3012static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
3013 u32 pid, u32 seq, struct neigh_table *tbl)
3014{
3015 struct sk_buff *skb;
3016 int err = 0;
3017
3018 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
3019 if (!skb)
3020 return -ENOBUFS;
3021
3022 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3023 if (err) {
3024 kfree_skb(skb);
3025 goto errout;
3026 }
3027
3028 err = rtnl_unicast(skb, net, pid);
3029errout:
3030 return err;
3031}
3032
3033static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3034 struct netlink_ext_ack *extack)
3035{
3036 struct net *net = sock_net(in_skb->sk);
3037 struct net_device *dev = NULL;
3038 struct neigh_table *tbl = NULL;
3039 struct neighbour *neigh;
3040 void *dst = NULL;
3041 u8 ndm_flags = 0;
3042 int dev_idx = 0;
3043 int err;
3044
3045 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
3046 extack);
3047 if (err < 0)
3048 return err;
3049
3050 if (dev_idx) {
3051 dev = __dev_get_by_index(net, dev_idx);
3052 if (!dev) {
3053 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
3054 return -ENODEV;
3055 }
3056 }
3057
3058 if (!dst) {
3059 NL_SET_ERR_MSG(extack, "Network address not specified");
3060 return -EINVAL;
3061 }
3062
3063 if (ndm_flags & NTF_PROXY) {
3064 struct pneigh_entry *pn;
3065
3066 pn = pneigh_lookup(tbl, net, dst, dev, 0);
3067 if (!pn) {
3068 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3069 return -ENOENT;
3070 }
3071 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3072 nlh->nlmsg_seq, tbl);
3073 }
3074
3075 if (!dev) {
3076 NL_SET_ERR_MSG(extack, "No device specified");
3077 return -EINVAL;
3078 }
3079
3080 neigh = neigh_lookup(tbl, dst, dev);
3081 if (!neigh) {
3082 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3083 return -ENOENT;
3084 }
3085
3086 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3087 nlh->nlmsg_seq);
3088
3089 neigh_release(neigh);
3090
3091 return err;
3092}
3093
3094void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3095{
3096 int chain;
3097 struct neigh_hash_table *nht;
3098
3099 rcu_read_lock_bh();
3100 nht = rcu_dereference_bh(tbl->nht);
3101
3102 read_lock(&tbl->lock); /* avoid resizes */
3103 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3104 struct neighbour *n;
3105
3106 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
3107 n != NULL;
3108 n = rcu_dereference_bh(n->next))
3109 cb(n, cookie);
3110 }
3111 read_unlock(&tbl->lock);
3112 rcu_read_unlock_bh();
3113}
3114EXPORT_SYMBOL(neigh_for_each);
3115
3116/* The tbl->lock must be held as a writer and BH disabled. */
3117void __neigh_for_each_release(struct neigh_table *tbl,
3118 int (*cb)(struct neighbour *))
3119{
3120 int chain;
3121 struct neigh_hash_table *nht;
3122
3123 nht = rcu_dereference_protected(tbl->nht,
3124 lockdep_is_held(&tbl->lock));
3125 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3126 struct neighbour *n;
3127 struct neighbour __rcu **np;
3128
3129 np = &nht->hash_buckets[chain];
3130 while ((n = rcu_dereference_protected(*np,
3131 lockdep_is_held(&tbl->lock))) != NULL) {
3132 int release;
3133
3134 write_lock(&n->lock);
3135 release = cb(n);
3136 if (release) {
3137 rcu_assign_pointer(*np,
3138 rcu_dereference_protected(n->next,
3139 lockdep_is_held(&tbl->lock)));
3140 neigh_mark_dead(n);
3141 } else
3142 np = &n->next;
3143 write_unlock(&n->lock);
3144 if (release)
3145 neigh_cleanup_and_release(n);
3146 }
3147 }
3148}
3149EXPORT_SYMBOL(__neigh_for_each_release);
3150
3151int neigh_xmit(int index, struct net_device *dev,
3152 const void *addr, struct sk_buff *skb)
3153{
3154 int err = -EAFNOSUPPORT;
3155 if (likely(index < NEIGH_NR_TABLES)) {
3156 struct neigh_table *tbl;
3157 struct neighbour *neigh;
3158
3159 tbl = neigh_tables[index];
3160 if (!tbl)
3161 goto out;
3162 rcu_read_lock_bh();
3163 if (index == NEIGH_ARP_TABLE) {
3164 u32 key = *((u32 *)addr);
3165
3166 neigh = __ipv4_neigh_lookup_noref(dev, key);
3167 } else {
3168 neigh = __neigh_lookup_noref(tbl, addr, dev);
3169 }
3170 if (!neigh)
3171 neigh = __neigh_create(tbl, addr, dev, false);
3172 err = PTR_ERR(neigh);
3173 if (IS_ERR(neigh)) {
3174 rcu_read_unlock_bh();
3175 goto out_kfree_skb;
3176 }
3177 err = neigh->output(neigh, skb);
3178 rcu_read_unlock_bh();
3179 }
3180 else if (index == NEIGH_LINK_TABLE) {
3181 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3182 addr, NULL, skb->len);
3183 if (err < 0)
3184 goto out_kfree_skb;
3185 err = dev_queue_xmit(skb);
3186 }
3187out:
3188 return err;
3189out_kfree_skb:
3190 kfree_skb(skb);
3191 goto out;
3192}
3193EXPORT_SYMBOL(neigh_xmit);
3194
3195#ifdef CONFIG_PROC_FS
3196
3197static struct neighbour *neigh_get_first(struct seq_file *seq)
3198{
3199 struct neigh_seq_state *state = seq->private;
3200 struct net *net = seq_file_net(seq);
3201 struct neigh_hash_table *nht = state->nht;
3202 struct neighbour *n = NULL;
3203 int bucket;
3204
3205 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3206 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3207 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3208
3209 while (n) {
3210 if (!net_eq(dev_net(n->dev), net))
3211 goto next;
3212 if (state->neigh_sub_iter) {
3213 loff_t fakep = 0;
3214 void *v;
3215
3216 v = state->neigh_sub_iter(state, n, &fakep);
3217 if (!v)
3218 goto next;
3219 }
3220 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3221 break;
3222 if (n->nud_state & ~NUD_NOARP)
3223 break;
3224next:
3225 n = rcu_dereference_bh(n->next);
3226 }
3227
3228 if (n)
3229 break;
3230 }
3231 state->bucket = bucket;
3232
3233 return n;
3234}
3235
3236static struct neighbour *neigh_get_next(struct seq_file *seq,
3237 struct neighbour *n,
3238 loff_t *pos)
3239{
3240 struct neigh_seq_state *state = seq->private;
3241 struct net *net = seq_file_net(seq);
3242 struct neigh_hash_table *nht = state->nht;
3243
3244 if (state->neigh_sub_iter) {
3245 void *v = state->neigh_sub_iter(state, n, pos);
3246 if (v)
3247 return n;
3248 }
3249 n = rcu_dereference_bh(n->next);
3250
3251 while (1) {
3252 while (n) {
3253 if (!net_eq(dev_net(n->dev), net))
3254 goto next;
3255 if (state->neigh_sub_iter) {
3256 void *v = state->neigh_sub_iter(state, n, pos);
3257 if (v)
3258 return n;
3259 goto next;
3260 }
3261 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3262 break;
3263
3264 if (n->nud_state & ~NUD_NOARP)
3265 break;
3266next:
3267 n = rcu_dereference_bh(n->next);
3268 }
3269
3270 if (n)
3271 break;
3272
3273 if (++state->bucket >= (1 << nht->hash_shift))
3274 break;
3275
3276 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3277 }
3278
3279 if (n && pos)
3280 --(*pos);
3281 return n;
3282}
3283
3284static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3285{
3286 struct neighbour *n = neigh_get_first(seq);
3287
3288 if (n) {
3289 --(*pos);
3290 while (*pos) {
3291 n = neigh_get_next(seq, n, pos);
3292 if (!n)
3293 break;
3294 }
3295 }
3296 return *pos ? NULL : n;
3297}
3298
3299static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3300{
3301 struct neigh_seq_state *state = seq->private;
3302 struct net *net = seq_file_net(seq);
3303 struct neigh_table *tbl = state->tbl;
3304 struct pneigh_entry *pn = NULL;
3305 int bucket;
3306
3307 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3308 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3309 pn = tbl->phash_buckets[bucket];
3310 while (pn && !net_eq(pneigh_net(pn), net))
3311 pn = pn->next;
3312 if (pn)
3313 break;
3314 }
3315 state->bucket = bucket;
3316
3317 return pn;
3318}
3319
3320static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3321 struct pneigh_entry *pn,
3322 loff_t *pos)
3323{
3324 struct neigh_seq_state *state = seq->private;
3325 struct net *net = seq_file_net(seq);
3326 struct neigh_table *tbl = state->tbl;
3327
3328 do {
3329 pn = pn->next;
3330 } while (pn && !net_eq(pneigh_net(pn), net));
3331
3332 while (!pn) {
3333 if (++state->bucket > PNEIGH_HASHMASK)
3334 break;
3335 pn = tbl->phash_buckets[state->bucket];
3336 while (pn && !net_eq(pneigh_net(pn), net))
3337 pn = pn->next;
3338 if (pn)
3339 break;
3340 }
3341
3342 if (pn && pos)
3343 --(*pos);
3344
3345 return pn;
3346}
3347
3348static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3349{
3350 struct pneigh_entry *pn = pneigh_get_first(seq);
3351
3352 if (pn) {
3353 --(*pos);
3354 while (*pos) {
3355 pn = pneigh_get_next(seq, pn, pos);
3356 if (!pn)
3357 break;
3358 }
3359 }
3360 return *pos ? NULL : pn;
3361}
3362
3363static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3364{
3365 struct neigh_seq_state *state = seq->private;
3366 void *rc;
3367 loff_t idxpos = *pos;
3368
3369 rc = neigh_get_idx(seq, &idxpos);
3370 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3371 rc = pneigh_get_idx(seq, &idxpos);
3372
3373 return rc;
3374}
3375
3376void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3377 __acquires(tbl->lock)
3378 __acquires(rcu_bh)
3379{
3380 struct neigh_seq_state *state = seq->private;
3381
3382 state->tbl = tbl;
3383 state->bucket = 0;
3384 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3385
3386 rcu_read_lock_bh();
3387 state->nht = rcu_dereference_bh(tbl->nht);
3388 read_lock(&tbl->lock);
3389
3390 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3391}
3392EXPORT_SYMBOL(neigh_seq_start);
3393
3394void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3395{
3396 struct neigh_seq_state *state;
3397 void *rc;
3398
3399 if (v == SEQ_START_TOKEN) {
3400 rc = neigh_get_first(seq);
3401 goto out;
3402 }
3403
3404 state = seq->private;
3405 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3406 rc = neigh_get_next(seq, v, NULL);
3407 if (rc)
3408 goto out;
3409 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3410 rc = pneigh_get_first(seq);
3411 } else {
3412 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3413 rc = pneigh_get_next(seq, v, NULL);
3414 }
3415out:
3416 ++(*pos);
3417 return rc;
3418}
3419EXPORT_SYMBOL(neigh_seq_next);
3420
3421void neigh_seq_stop(struct seq_file *seq, void *v)
3422 __releases(tbl->lock)
3423 __releases(rcu_bh)
3424{
3425 struct neigh_seq_state *state = seq->private;
3426 struct neigh_table *tbl = state->tbl;
3427
3428 read_unlock(&tbl->lock);
3429 rcu_read_unlock_bh();
3430}
3431EXPORT_SYMBOL(neigh_seq_stop);
3432
3433/* statistics via seq_file */
3434
3435static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3436{
3437 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3438 int cpu;
3439
3440 if (*pos == 0)
3441 return SEQ_START_TOKEN;
3442
3443 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3444 if (!cpu_possible(cpu))
3445 continue;
3446 *pos = cpu+1;
3447 return per_cpu_ptr(tbl->stats, cpu);
3448 }
3449 return NULL;
3450}
3451
3452static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3453{
3454 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3455 int cpu;
3456
3457 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3458 if (!cpu_possible(cpu))
3459 continue;
3460 *pos = cpu+1;
3461 return per_cpu_ptr(tbl->stats, cpu);
3462 }
3463 (*pos)++;
3464 return NULL;
3465}
3466
3467static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3468{
3469
3470}
3471
3472static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3473{
3474 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3475 struct neigh_statistics *st = v;
3476
3477 if (v == SEQ_START_TOKEN) {
3478 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3479 return 0;
3480 }
3481
3482 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3483 "%08lx %08lx %08lx "
3484 "%08lx %08lx %08lx\n",
3485 atomic_read(&tbl->entries),
3486
3487 st->allocs,
3488 st->destroys,
3489 st->hash_grows,
3490
3491 st->lookups,
3492 st->hits,
3493
3494 st->res_failed,
3495
3496 st->rcv_probes_mcast,
3497 st->rcv_probes_ucast,
3498
3499 st->periodic_gc_runs,
3500 st->forced_gc_runs,
3501 st->unres_discards,
3502 st->table_fulls
3503 );
3504
3505 return 0;
3506}
3507
3508static const struct seq_operations neigh_stat_seq_ops = {
3509 .start = neigh_stat_seq_start,
3510 .next = neigh_stat_seq_next,
3511 .stop = neigh_stat_seq_stop,
3512 .show = neigh_stat_seq_show,
3513};
3514#endif /* CONFIG_PROC_FS */
3515
3516static void __neigh_notify(struct neighbour *n, int type, int flags,
3517 u32 pid)
3518{
3519 struct net *net = dev_net(n->dev);
3520 struct sk_buff *skb;
3521 int err = -ENOBUFS;
3522
3523 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3524 if (skb == NULL)
3525 goto errout;
3526
3527 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3528 if (err < 0) {
3529 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3530 WARN_ON(err == -EMSGSIZE);
3531 kfree_skb(skb);
3532 goto errout;
3533 }
3534 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3535 return;
3536errout:
3537 if (err < 0)
3538 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3539}
3540
3541void neigh_app_ns(struct neighbour *n)
3542{
3543 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3544}
3545EXPORT_SYMBOL(neigh_app_ns);
3546
3547#ifdef CONFIG_SYSCTL
3548static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3549
3550static int proc_unres_qlen(struct ctl_table *ctl, int write,
3551 void *buffer, size_t *lenp, loff_t *ppos)
3552{
3553 int size, ret;
3554 struct ctl_table tmp = *ctl;
3555
3556 tmp.extra1 = SYSCTL_ZERO;
3557 tmp.extra2 = &unres_qlen_max;
3558 tmp.data = &size;
3559
3560 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3561 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3562
3563 if (write && !ret)
3564 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3565 return ret;
3566}
3567
3568static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3569 int index)
3570{
3571 struct net_device *dev;
3572 int family = neigh_parms_family(p);
3573
3574 rcu_read_lock();
3575 for_each_netdev_rcu(net, dev) {
3576 struct neigh_parms *dst_p =
3577 neigh_get_dev_parms_rcu(dev, family);
3578
3579 if (dst_p && !test_bit(index, dst_p->data_state))
3580 dst_p->data[index] = p->data[index];
3581 }
3582 rcu_read_unlock();
3583}
3584
3585static void neigh_proc_update(struct ctl_table *ctl, int write)
3586{
3587 struct net_device *dev = ctl->extra1;
3588 struct neigh_parms *p = ctl->extra2;
3589 struct net *net = neigh_parms_net(p);
3590 int index = (int *) ctl->data - p->data;
3591
3592 if (!write)
3593 return;
3594
3595 set_bit(index, p->data_state);
3596 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3597 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3598 if (!dev) /* NULL dev means this is default value */
3599 neigh_copy_dflt_parms(net, p, index);
3600}
3601
3602static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3603 void *buffer, size_t *lenp,
3604 loff_t *ppos)
3605{
3606 struct ctl_table tmp = *ctl;
3607 int ret;
3608
3609 tmp.extra1 = SYSCTL_ZERO;
3610 tmp.extra2 = SYSCTL_INT_MAX;
3611
3612 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3613 neigh_proc_update(ctl, write);
3614 return ret;
3615}
3616
3617static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
3618 void *buffer, size_t *lenp, loff_t *ppos)
3619{
3620 struct ctl_table tmp = *ctl;
3621 int ret;
3622
3623 int min = msecs_to_jiffies(1);
3624
3625 tmp.extra1 = &min;
3626 tmp.extra2 = NULL;
3627
3628 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3629 neigh_proc_update(ctl, write);
3630 return ret;
3631}
3632
3633int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3634 size_t *lenp, loff_t *ppos)
3635{
3636 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3637
3638 neigh_proc_update(ctl, write);
3639 return ret;
3640}
3641EXPORT_SYMBOL(neigh_proc_dointvec);
3642
3643int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3644 size_t *lenp, loff_t *ppos)
3645{
3646 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3647
3648 neigh_proc_update(ctl, write);
3649 return ret;
3650}
3651EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3652
3653static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3654 void *buffer, size_t *lenp,
3655 loff_t *ppos)
3656{
3657 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3658
3659 neigh_proc_update(ctl, write);
3660 return ret;
3661}
3662
3663int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3664 void *buffer, size_t *lenp, loff_t *ppos)
3665{
3666 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3667
3668 neigh_proc_update(ctl, write);
3669 return ret;
3670}
3671EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3672
3673static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3674 void *buffer, size_t *lenp,
3675 loff_t *ppos)
3676{
3677 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3678
3679 neigh_proc_update(ctl, write);
3680 return ret;
3681}
3682
3683static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3684 void *buffer, size_t *lenp,
3685 loff_t *ppos)
3686{
3687 struct neigh_parms *p = ctl->extra2;
3688 int ret;
3689
3690 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3691 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3692 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3693 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3694 else
3695 ret = -1;
3696
3697 if (write && ret == 0) {
3698 /* update reachable_time as well, otherwise, the change will
3699 * only be effective after the next time neigh_periodic_work
3700 * decides to recompute it
3701 */
3702 p->reachable_time =
3703 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3704 }
3705 return ret;
3706}
3707
3708#define NEIGH_PARMS_DATA_OFFSET(index) \
3709 (&((struct neigh_parms *) 0)->data[index])
3710
3711#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3712 [NEIGH_VAR_ ## attr] = { \
3713 .procname = name, \
3714 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3715 .maxlen = sizeof(int), \
3716 .mode = mval, \
3717 .proc_handler = proc, \
3718 }
3719
3720#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3721 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3722
3723#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3724 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3725
3726#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3727 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3728
3729#define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3730 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3731
3732#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3733 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3734
3735#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3736 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3737
3738static struct neigh_sysctl_table {
3739 struct ctl_table_header *sysctl_header;
3740 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3741} neigh_sysctl_template __read_mostly = {
3742 .neigh_vars = {
3743 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3744 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3745 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3746 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3747 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3748 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3749 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3750 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3751 "interval_probe_time_ms"),
3752 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3753 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3754 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3755 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3756 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3757 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3758 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3759 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3760 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3761 [NEIGH_VAR_GC_INTERVAL] = {
3762 .procname = "gc_interval",
3763 .maxlen = sizeof(int),
3764 .mode = 0644,
3765 .proc_handler = proc_dointvec_jiffies,
3766 },
3767 [NEIGH_VAR_GC_THRESH1] = {
3768 .procname = "gc_thresh1",
3769 .maxlen = sizeof(int),
3770 .mode = 0644,
3771 .extra1 = SYSCTL_ZERO,
3772 .extra2 = SYSCTL_INT_MAX,
3773 .proc_handler = proc_dointvec_minmax,
3774 },
3775 [NEIGH_VAR_GC_THRESH2] = {
3776 .procname = "gc_thresh2",
3777 .maxlen = sizeof(int),
3778 .mode = 0644,
3779 .extra1 = SYSCTL_ZERO,
3780 .extra2 = SYSCTL_INT_MAX,
3781 .proc_handler = proc_dointvec_minmax,
3782 },
3783 [NEIGH_VAR_GC_THRESH3] = {
3784 .procname = "gc_thresh3",
3785 .maxlen = sizeof(int),
3786 .mode = 0644,
3787 .extra1 = SYSCTL_ZERO,
3788 .extra2 = SYSCTL_INT_MAX,
3789 .proc_handler = proc_dointvec_minmax,
3790 },
3791 {},
3792 },
3793};
3794
3795int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3796 proc_handler *handler)
3797{
3798 int i;
3799 struct neigh_sysctl_table *t;
3800 const char *dev_name_source;
3801 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3802 char *p_name;
3803
3804 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3805 if (!t)
3806 goto err;
3807
3808 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3809 t->neigh_vars[i].data += (long) p;
3810 t->neigh_vars[i].extra1 = dev;
3811 t->neigh_vars[i].extra2 = p;
3812 }
3813
3814 if (dev) {
3815 dev_name_source = dev->name;
3816 /* Terminate the table early */
3817 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3818 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3819 } else {
3820 struct neigh_table *tbl = p->tbl;
3821 dev_name_source = "default";
3822 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3823 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3824 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3825 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3826 }
3827
3828 if (handler) {
3829 /* RetransTime */
3830 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3831 /* ReachableTime */
3832 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3833 /* RetransTime (in milliseconds)*/
3834 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3835 /* ReachableTime (in milliseconds) */
3836 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3837 } else {
3838 /* Those handlers will update p->reachable_time after
3839 * base_reachable_time(_ms) is set to ensure the new timer starts being
3840 * applied after the next neighbour update instead of waiting for
3841 * neigh_periodic_work to update its value (can be multiple minutes)
3842 * So any handler that replaces them should do this as well
3843 */
3844 /* ReachableTime */
3845 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3846 neigh_proc_base_reachable_time;
3847 /* ReachableTime (in milliseconds) */
3848 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3849 neigh_proc_base_reachable_time;
3850 }
3851
3852 switch (neigh_parms_family(p)) {
3853 case AF_INET:
3854 p_name = "ipv4";
3855 break;
3856 case AF_INET6:
3857 p_name = "ipv6";
3858 break;
3859 default:
3860 BUG();
3861 }
3862
3863 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3864 p_name, dev_name_source);
3865 t->sysctl_header =
3866 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3867 if (!t->sysctl_header)
3868 goto free;
3869
3870 p->sysctl_table = t;
3871 return 0;
3872
3873free:
3874 kfree(t);
3875err:
3876 return -ENOBUFS;
3877}
3878EXPORT_SYMBOL(neigh_sysctl_register);
3879
3880void neigh_sysctl_unregister(struct neigh_parms *p)
3881{
3882 if (p->sysctl_table) {
3883 struct neigh_sysctl_table *t = p->sysctl_table;
3884 p->sysctl_table = NULL;
3885 unregister_net_sysctl_table(t->sysctl_header);
3886 kfree(t);
3887 }
3888}
3889EXPORT_SYMBOL(neigh_sysctl_unregister);
3890
3891#endif /* CONFIG_SYSCTL */
3892
3893static int __init neigh_init(void)
3894{
3895 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3896 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3897 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3898
3899 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3900 0);
3901 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3902
3903 return 0;
3904}
3905
3906subsys_initcall(neigh_init);