Loading...
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/socket.h>
25#include <linux/netdevice.h>
26#include <linux/proc_fs.h>
27#ifdef CONFIG_SYSCTL
28#include <linux/sysctl.h>
29#endif
30#include <linux/times.h>
31#include <net/net_namespace.h>
32#include <net/neighbour.h>
33#include <net/dst.h>
34#include <net/sock.h>
35#include <net/netevent.h>
36#include <net/netlink.h>
37#include <linux/rtnetlink.h>
38#include <linux/random.h>
39#include <linux/string.h>
40#include <linux/log2.h>
41
42#define NEIGH_DEBUG 1
43
44#define NEIGH_PRINTK(x...) printk(x)
45#define NEIGH_NOPRINTK(x...) do { ; } while(0)
46#define NEIGH_PRINTK1 NEIGH_NOPRINTK
47#define NEIGH_PRINTK2 NEIGH_NOPRINTK
48
49#if NEIGH_DEBUG >= 1
50#undef NEIGH_PRINTK1
51#define NEIGH_PRINTK1 NEIGH_PRINTK
52#endif
53#if NEIGH_DEBUG >= 2
54#undef NEIGH_PRINTK2
55#define NEIGH_PRINTK2 NEIGH_PRINTK
56#endif
57
58#define PNEIGH_HASHMASK 0xF
59
60static void neigh_timer_handler(unsigned long arg);
61static void __neigh_notify(struct neighbour *n, int type, int flags);
62static void neigh_update_notify(struct neighbour *neigh);
63static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
64
65static struct neigh_table *neigh_tables;
66#ifdef CONFIG_PROC_FS
67static const struct file_operations neigh_stat_seq_fops;
68#endif
69
70/*
71 Neighbour hash table buckets are protected with rwlock tbl->lock.
72
73 - All the scans/updates to hash buckets MUST be made under this lock.
74 - NOTHING clever should be made under this lock: no callbacks
75 to protocol backends, no attempts to send something to network.
76 It will result in deadlocks, if backend/driver wants to use neighbour
77 cache.
78 - If the entry requires some non-trivial actions, increase
79 its reference count and release table lock.
80
81 Neighbour entries are protected:
82 - with reference count.
83 - with rwlock neigh->lock
84
85 Reference count prevents destruction.
86
87 neigh->lock mainly serializes ll address data and its validity state.
88 However, the same lock is used to protect another entry fields:
89 - timer
90 - resolution queue
91
92 Again, nothing clever shall be made under neigh->lock,
93 the most complicated procedure, which we allow is dev->hard_header.
94 It is supposed, that dev->hard_header is simplistic and does
95 not make callbacks to neighbour tables.
96
97 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
98 list of neighbour tables. This list is used only in process context,
99 */
100
101static DEFINE_RWLOCK(neigh_tbl_lock);
102
103static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
104{
105 kfree_skb(skb);
106 return -ENETDOWN;
107}
108
109static void neigh_cleanup_and_release(struct neighbour *neigh)
110{
111 if (neigh->parms->neigh_cleanup)
112 neigh->parms->neigh_cleanup(neigh);
113
114 __neigh_notify(neigh, RTM_DELNEIGH, 0);
115 neigh_release(neigh);
116}
117
118/*
119 * It is random distribution in the interval (1/2)*base...(3/2)*base.
120 * It corresponds to default IPv6 settings and is not overridable,
121 * because it is really reasonable choice.
122 */
123
124unsigned long neigh_rand_reach_time(unsigned long base)
125{
126 return base ? (net_random() % base) + (base >> 1) : 0;
127}
128EXPORT_SYMBOL(neigh_rand_reach_time);
129
130
131static int neigh_forced_gc(struct neigh_table *tbl)
132{
133 int shrunk = 0;
134 int i;
135 struct neigh_hash_table *nht;
136
137 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
138
139 write_lock_bh(&tbl->lock);
140 nht = rcu_dereference_protected(tbl->nht,
141 lockdep_is_held(&tbl->lock));
142 for (i = 0; i < (1 << nht->hash_shift); i++) {
143 struct neighbour *n;
144 struct neighbour __rcu **np;
145
146 np = &nht->hash_buckets[i];
147 while ((n = rcu_dereference_protected(*np,
148 lockdep_is_held(&tbl->lock))) != NULL) {
149 /* Neighbour record may be discarded if:
150 * - nobody refers to it.
151 * - it is not permanent
152 */
153 write_lock(&n->lock);
154 if (atomic_read(&n->refcnt) == 1 &&
155 !(n->nud_state & NUD_PERMANENT)) {
156 rcu_assign_pointer(*np,
157 rcu_dereference_protected(n->next,
158 lockdep_is_held(&tbl->lock)));
159 n->dead = 1;
160 shrunk = 1;
161 write_unlock(&n->lock);
162 neigh_cleanup_and_release(n);
163 continue;
164 }
165 write_unlock(&n->lock);
166 np = &n->next;
167 }
168 }
169
170 tbl->last_flush = jiffies;
171
172 write_unlock_bh(&tbl->lock);
173
174 return shrunk;
175}
176
177static void neigh_add_timer(struct neighbour *n, unsigned long when)
178{
179 neigh_hold(n);
180 if (unlikely(mod_timer(&n->timer, when))) {
181 printk("NEIGH: BUG, double timer add, state is %x\n",
182 n->nud_state);
183 dump_stack();
184 }
185}
186
187static int neigh_del_timer(struct neighbour *n)
188{
189 if ((n->nud_state & NUD_IN_TIMER) &&
190 del_timer(&n->timer)) {
191 neigh_release(n);
192 return 1;
193 }
194 return 0;
195}
196
197static void pneigh_queue_purge(struct sk_buff_head *list)
198{
199 struct sk_buff *skb;
200
201 while ((skb = skb_dequeue(list)) != NULL) {
202 dev_put(skb->dev);
203 kfree_skb(skb);
204 }
205}
206
207static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208{
209 int i;
210 struct neigh_hash_table *nht;
211
212 nht = rcu_dereference_protected(tbl->nht,
213 lockdep_is_held(&tbl->lock));
214
215 for (i = 0; i < (1 << nht->hash_shift); i++) {
216 struct neighbour *n;
217 struct neighbour __rcu **np = &nht->hash_buckets[i];
218
219 while ((n = rcu_dereference_protected(*np,
220 lockdep_is_held(&tbl->lock))) != NULL) {
221 if (dev && n->dev != dev) {
222 np = &n->next;
223 continue;
224 }
225 rcu_assign_pointer(*np,
226 rcu_dereference_protected(n->next,
227 lockdep_is_held(&tbl->lock)));
228 write_lock(&n->lock);
229 neigh_del_timer(n);
230 n->dead = 1;
231
232 if (atomic_read(&n->refcnt) != 1) {
233 /* The most unpleasant situation.
234 We must destroy neighbour entry,
235 but someone still uses it.
236
237 The destroy will be delayed until
238 the last user releases us, but
239 we must kill timers etc. and move
240 it to safe state.
241 */
242 skb_queue_purge(&n->arp_queue);
243 n->arp_queue_len_bytes = 0;
244 n->output = neigh_blackhole;
245 if (n->nud_state & NUD_VALID)
246 n->nud_state = NUD_NOARP;
247 else
248 n->nud_state = NUD_NONE;
249 NEIGH_PRINTK2("neigh %p is stray.\n", n);
250 }
251 write_unlock(&n->lock);
252 neigh_cleanup_and_release(n);
253 }
254 }
255}
256
257void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
258{
259 write_lock_bh(&tbl->lock);
260 neigh_flush_dev(tbl, dev);
261 write_unlock_bh(&tbl->lock);
262}
263EXPORT_SYMBOL(neigh_changeaddr);
264
265int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
266{
267 write_lock_bh(&tbl->lock);
268 neigh_flush_dev(tbl, dev);
269 pneigh_ifdown(tbl, dev);
270 write_unlock_bh(&tbl->lock);
271
272 del_timer_sync(&tbl->proxy_timer);
273 pneigh_queue_purge(&tbl->proxy_queue);
274 return 0;
275}
276EXPORT_SYMBOL(neigh_ifdown);
277
278static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
279{
280 struct neighbour *n = NULL;
281 unsigned long now = jiffies;
282 int entries;
283
284 entries = atomic_inc_return(&tbl->entries) - 1;
285 if (entries >= tbl->gc_thresh3 ||
286 (entries >= tbl->gc_thresh2 &&
287 time_after(now, tbl->last_flush + 5 * HZ))) {
288 if (!neigh_forced_gc(tbl) &&
289 entries >= tbl->gc_thresh3)
290 goto out_entries;
291 }
292
293 if (tbl->entry_size)
294 n = kzalloc(tbl->entry_size, GFP_ATOMIC);
295 else {
296 int sz = sizeof(*n) + tbl->key_len;
297
298 sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
299 sz += dev->neigh_priv_len;
300 n = kzalloc(sz, GFP_ATOMIC);
301 }
302 if (!n)
303 goto out_entries;
304
305 skb_queue_head_init(&n->arp_queue);
306 rwlock_init(&n->lock);
307 seqlock_init(&n->ha_lock);
308 n->updated = n->used = now;
309 n->nud_state = NUD_NONE;
310 n->output = neigh_blackhole;
311 seqlock_init(&n->hh.hh_lock);
312 n->parms = neigh_parms_clone(&tbl->parms);
313 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
314
315 NEIGH_CACHE_STAT_INC(tbl, allocs);
316 n->tbl = tbl;
317 atomic_set(&n->refcnt, 1);
318 n->dead = 1;
319out:
320 return n;
321
322out_entries:
323 atomic_dec(&tbl->entries);
324 goto out;
325}
326
327static void neigh_get_hash_rnd(u32 *x)
328{
329 get_random_bytes(x, sizeof(*x));
330 *x |= 1;
331}
332
333static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
334{
335 size_t size = (1 << shift) * sizeof(struct neighbour *);
336 struct neigh_hash_table *ret;
337 struct neighbour __rcu **buckets;
338 int i;
339
340 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
341 if (!ret)
342 return NULL;
343 if (size <= PAGE_SIZE)
344 buckets = kzalloc(size, GFP_ATOMIC);
345 else
346 buckets = (struct neighbour __rcu **)
347 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
348 get_order(size));
349 if (!buckets) {
350 kfree(ret);
351 return NULL;
352 }
353 ret->hash_buckets = buckets;
354 ret->hash_shift = shift;
355 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
356 neigh_get_hash_rnd(&ret->hash_rnd[i]);
357 return ret;
358}
359
360static void neigh_hash_free_rcu(struct rcu_head *head)
361{
362 struct neigh_hash_table *nht = container_of(head,
363 struct neigh_hash_table,
364 rcu);
365 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
366 struct neighbour __rcu **buckets = nht->hash_buckets;
367
368 if (size <= PAGE_SIZE)
369 kfree(buckets);
370 else
371 free_pages((unsigned long)buckets, get_order(size));
372 kfree(nht);
373}
374
375static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
376 unsigned long new_shift)
377{
378 unsigned int i, hash;
379 struct neigh_hash_table *new_nht, *old_nht;
380
381 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
382
383 old_nht = rcu_dereference_protected(tbl->nht,
384 lockdep_is_held(&tbl->lock));
385 new_nht = neigh_hash_alloc(new_shift);
386 if (!new_nht)
387 return old_nht;
388
389 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
390 struct neighbour *n, *next;
391
392 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
393 lockdep_is_held(&tbl->lock));
394 n != NULL;
395 n = next) {
396 hash = tbl->hash(n->primary_key, n->dev,
397 new_nht->hash_rnd);
398
399 hash >>= (32 - new_nht->hash_shift);
400 next = rcu_dereference_protected(n->next,
401 lockdep_is_held(&tbl->lock));
402
403 rcu_assign_pointer(n->next,
404 rcu_dereference_protected(
405 new_nht->hash_buckets[hash],
406 lockdep_is_held(&tbl->lock)));
407 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
408 }
409 }
410
411 rcu_assign_pointer(tbl->nht, new_nht);
412 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
413 return new_nht;
414}
415
416struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
417 struct net_device *dev)
418{
419 struct neighbour *n;
420 int key_len = tbl->key_len;
421 u32 hash_val;
422 struct neigh_hash_table *nht;
423
424 NEIGH_CACHE_STAT_INC(tbl, lookups);
425
426 rcu_read_lock_bh();
427 nht = rcu_dereference_bh(tbl->nht);
428 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
429
430 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
431 n != NULL;
432 n = rcu_dereference_bh(n->next)) {
433 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
434 if (!atomic_inc_not_zero(&n->refcnt))
435 n = NULL;
436 NEIGH_CACHE_STAT_INC(tbl, hits);
437 break;
438 }
439 }
440
441 rcu_read_unlock_bh();
442 return n;
443}
444EXPORT_SYMBOL(neigh_lookup);
445
446struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
447 const void *pkey)
448{
449 struct neighbour *n;
450 int key_len = tbl->key_len;
451 u32 hash_val;
452 struct neigh_hash_table *nht;
453
454 NEIGH_CACHE_STAT_INC(tbl, lookups);
455
456 rcu_read_lock_bh();
457 nht = rcu_dereference_bh(tbl->nht);
458 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
459
460 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
461 n != NULL;
462 n = rcu_dereference_bh(n->next)) {
463 if (!memcmp(n->primary_key, pkey, key_len) &&
464 net_eq(dev_net(n->dev), net)) {
465 if (!atomic_inc_not_zero(&n->refcnt))
466 n = NULL;
467 NEIGH_CACHE_STAT_INC(tbl, hits);
468 break;
469 }
470 }
471
472 rcu_read_unlock_bh();
473 return n;
474}
475EXPORT_SYMBOL(neigh_lookup_nodev);
476
477struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
478 struct net_device *dev)
479{
480 u32 hash_val;
481 int key_len = tbl->key_len;
482 int error;
483 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
484 struct neigh_hash_table *nht;
485
486 if (!n) {
487 rc = ERR_PTR(-ENOBUFS);
488 goto out;
489 }
490
491 memcpy(n->primary_key, pkey, key_len);
492 n->dev = dev;
493 dev_hold(dev);
494
495 /* Protocol specific setup. */
496 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
497 rc = ERR_PTR(error);
498 goto out_neigh_release;
499 }
500
501 if (dev->netdev_ops->ndo_neigh_construct) {
502 error = dev->netdev_ops->ndo_neigh_construct(n);
503 if (error < 0) {
504 rc = ERR_PTR(error);
505 goto out_neigh_release;
506 }
507 }
508
509 /* Device specific setup. */
510 if (n->parms->neigh_setup &&
511 (error = n->parms->neigh_setup(n)) < 0) {
512 rc = ERR_PTR(error);
513 goto out_neigh_release;
514 }
515
516 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
517
518 write_lock_bh(&tbl->lock);
519 nht = rcu_dereference_protected(tbl->nht,
520 lockdep_is_held(&tbl->lock));
521
522 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
523 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
524
525 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
526
527 if (n->parms->dead) {
528 rc = ERR_PTR(-EINVAL);
529 goto out_tbl_unlock;
530 }
531
532 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
533 lockdep_is_held(&tbl->lock));
534 n1 != NULL;
535 n1 = rcu_dereference_protected(n1->next,
536 lockdep_is_held(&tbl->lock))) {
537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
538 neigh_hold(n1);
539 rc = n1;
540 goto out_tbl_unlock;
541 }
542 }
543
544 n->dead = 0;
545 neigh_hold(n);
546 rcu_assign_pointer(n->next,
547 rcu_dereference_protected(nht->hash_buckets[hash_val],
548 lockdep_is_held(&tbl->lock)));
549 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
550 write_unlock_bh(&tbl->lock);
551 NEIGH_PRINTK2("neigh %p is created.\n", n);
552 rc = n;
553out:
554 return rc;
555out_tbl_unlock:
556 write_unlock_bh(&tbl->lock);
557out_neigh_release:
558 neigh_release(n);
559 goto out;
560}
561EXPORT_SYMBOL(neigh_create);
562
563static u32 pneigh_hash(const void *pkey, int key_len)
564{
565 u32 hash_val = *(u32 *)(pkey + key_len - 4);
566 hash_val ^= (hash_val >> 16);
567 hash_val ^= hash_val >> 8;
568 hash_val ^= hash_val >> 4;
569 hash_val &= PNEIGH_HASHMASK;
570 return hash_val;
571}
572
573static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
574 struct net *net,
575 const void *pkey,
576 int key_len,
577 struct net_device *dev)
578{
579 while (n) {
580 if (!memcmp(n->key, pkey, key_len) &&
581 net_eq(pneigh_net(n), net) &&
582 (n->dev == dev || !n->dev))
583 return n;
584 n = n->next;
585 }
586 return NULL;
587}
588
589struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
590 struct net *net, const void *pkey, struct net_device *dev)
591{
592 int key_len = tbl->key_len;
593 u32 hash_val = pneigh_hash(pkey, key_len);
594
595 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
596 net, pkey, key_len, dev);
597}
598EXPORT_SYMBOL_GPL(__pneigh_lookup);
599
600struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
601 struct net *net, const void *pkey,
602 struct net_device *dev, int creat)
603{
604 struct pneigh_entry *n;
605 int key_len = tbl->key_len;
606 u32 hash_val = pneigh_hash(pkey, key_len);
607
608 read_lock_bh(&tbl->lock);
609 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
610 net, pkey, key_len, dev);
611 read_unlock_bh(&tbl->lock);
612
613 if (n || !creat)
614 goto out;
615
616 ASSERT_RTNL();
617
618 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
619 if (!n)
620 goto out;
621
622 write_pnet(&n->net, hold_net(net));
623 memcpy(n->key, pkey, key_len);
624 n->dev = dev;
625 if (dev)
626 dev_hold(dev);
627
628 if (tbl->pconstructor && tbl->pconstructor(n)) {
629 if (dev)
630 dev_put(dev);
631 release_net(net);
632 kfree(n);
633 n = NULL;
634 goto out;
635 }
636
637 write_lock_bh(&tbl->lock);
638 n->next = tbl->phash_buckets[hash_val];
639 tbl->phash_buckets[hash_val] = n;
640 write_unlock_bh(&tbl->lock);
641out:
642 return n;
643}
644EXPORT_SYMBOL(pneigh_lookup);
645
646
647int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
648 struct net_device *dev)
649{
650 struct pneigh_entry *n, **np;
651 int key_len = tbl->key_len;
652 u32 hash_val = pneigh_hash(pkey, key_len);
653
654 write_lock_bh(&tbl->lock);
655 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
656 np = &n->next) {
657 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
658 net_eq(pneigh_net(n), net)) {
659 *np = n->next;
660 write_unlock_bh(&tbl->lock);
661 if (tbl->pdestructor)
662 tbl->pdestructor(n);
663 if (n->dev)
664 dev_put(n->dev);
665 release_net(pneigh_net(n));
666 kfree(n);
667 return 0;
668 }
669 }
670 write_unlock_bh(&tbl->lock);
671 return -ENOENT;
672}
673
674static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
675{
676 struct pneigh_entry *n, **np;
677 u32 h;
678
679 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
680 np = &tbl->phash_buckets[h];
681 while ((n = *np) != NULL) {
682 if (!dev || n->dev == dev) {
683 *np = n->next;
684 if (tbl->pdestructor)
685 tbl->pdestructor(n);
686 if (n->dev)
687 dev_put(n->dev);
688 release_net(pneigh_net(n));
689 kfree(n);
690 continue;
691 }
692 np = &n->next;
693 }
694 }
695 return -ENOENT;
696}
697
698static void neigh_parms_destroy(struct neigh_parms *parms);
699
700static inline void neigh_parms_put(struct neigh_parms *parms)
701{
702 if (atomic_dec_and_test(&parms->refcnt))
703 neigh_parms_destroy(parms);
704}
705
706/*
707 * neighbour must already be out of the table;
708 *
709 */
710void neigh_destroy(struct neighbour *neigh)
711{
712 struct net_device *dev = neigh->dev;
713
714 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
715
716 if (!neigh->dead) {
717 pr_warn("Destroying alive neighbour %p\n", neigh);
718 dump_stack();
719 return;
720 }
721
722 if (neigh_del_timer(neigh))
723 pr_warn("Impossible event\n");
724
725 skb_queue_purge(&neigh->arp_queue);
726 neigh->arp_queue_len_bytes = 0;
727
728 if (dev->netdev_ops->ndo_neigh_destroy)
729 dev->netdev_ops->ndo_neigh_destroy(neigh);
730
731 dev_put(dev);
732 neigh_parms_put(neigh->parms);
733
734 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
735
736 atomic_dec(&neigh->tbl->entries);
737 kfree_rcu(neigh, rcu);
738}
739EXPORT_SYMBOL(neigh_destroy);
740
741/* Neighbour state is suspicious;
742 disable fast path.
743
744 Called with write_locked neigh.
745 */
746static void neigh_suspect(struct neighbour *neigh)
747{
748 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
749
750 neigh->output = neigh->ops->output;
751}
752
753/* Neighbour state is OK;
754 enable fast path.
755
756 Called with write_locked neigh.
757 */
758static void neigh_connect(struct neighbour *neigh)
759{
760 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
761
762 neigh->output = neigh->ops->connected_output;
763}
764
765static void neigh_periodic_work(struct work_struct *work)
766{
767 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
768 struct neighbour *n;
769 struct neighbour __rcu **np;
770 unsigned int i;
771 struct neigh_hash_table *nht;
772
773 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
774
775 write_lock_bh(&tbl->lock);
776 nht = rcu_dereference_protected(tbl->nht,
777 lockdep_is_held(&tbl->lock));
778
779 /*
780 * periodically recompute ReachableTime from random function
781 */
782
783 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
784 struct neigh_parms *p;
785 tbl->last_rand = jiffies;
786 for (p = &tbl->parms; p; p = p->next)
787 p->reachable_time =
788 neigh_rand_reach_time(p->base_reachable_time);
789 }
790
791 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
792 np = &nht->hash_buckets[i];
793
794 while ((n = rcu_dereference_protected(*np,
795 lockdep_is_held(&tbl->lock))) != NULL) {
796 unsigned int state;
797
798 write_lock(&n->lock);
799
800 state = n->nud_state;
801 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
802 write_unlock(&n->lock);
803 goto next_elt;
804 }
805
806 if (time_before(n->used, n->confirmed))
807 n->used = n->confirmed;
808
809 if (atomic_read(&n->refcnt) == 1 &&
810 (state == NUD_FAILED ||
811 time_after(jiffies, n->used + n->parms->gc_staletime))) {
812 *np = n->next;
813 n->dead = 1;
814 write_unlock(&n->lock);
815 neigh_cleanup_and_release(n);
816 continue;
817 }
818 write_unlock(&n->lock);
819
820next_elt:
821 np = &n->next;
822 }
823 /*
824 * It's fine to release lock here, even if hash table
825 * grows while we are preempted.
826 */
827 write_unlock_bh(&tbl->lock);
828 cond_resched();
829 write_lock_bh(&tbl->lock);
830 nht = rcu_dereference_protected(tbl->nht,
831 lockdep_is_held(&tbl->lock));
832 }
833 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
834 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
835 * base_reachable_time.
836 */
837 schedule_delayed_work(&tbl->gc_work,
838 tbl->parms.base_reachable_time >> 1);
839 write_unlock_bh(&tbl->lock);
840}
841
842static __inline__ int neigh_max_probes(struct neighbour *n)
843{
844 struct neigh_parms *p = n->parms;
845 return (n->nud_state & NUD_PROBE) ?
846 p->ucast_probes :
847 p->ucast_probes + p->app_probes + p->mcast_probes;
848}
849
850static void neigh_invalidate(struct neighbour *neigh)
851 __releases(neigh->lock)
852 __acquires(neigh->lock)
853{
854 struct sk_buff *skb;
855
856 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
857 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
858 neigh->updated = jiffies;
859
860 /* It is very thin place. report_unreachable is very complicated
861 routine. Particularly, it can hit the same neighbour entry!
862
863 So that, we try to be accurate and avoid dead loop. --ANK
864 */
865 while (neigh->nud_state == NUD_FAILED &&
866 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
867 write_unlock(&neigh->lock);
868 neigh->ops->error_report(neigh, skb);
869 write_lock(&neigh->lock);
870 }
871 skb_queue_purge(&neigh->arp_queue);
872 neigh->arp_queue_len_bytes = 0;
873}
874
875static void neigh_probe(struct neighbour *neigh)
876 __releases(neigh->lock)
877{
878 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
879 /* keep skb alive even if arp_queue overflows */
880 if (skb)
881 skb = skb_copy(skb, GFP_ATOMIC);
882 write_unlock(&neigh->lock);
883 neigh->ops->solicit(neigh, skb);
884 atomic_inc(&neigh->probes);
885 kfree_skb(skb);
886}
887
888/* Called when a timer expires for a neighbour entry. */
889
890static void neigh_timer_handler(unsigned long arg)
891{
892 unsigned long now, next;
893 struct neighbour *neigh = (struct neighbour *)arg;
894 unsigned int state;
895 int notify = 0;
896
897 write_lock(&neigh->lock);
898
899 state = neigh->nud_state;
900 now = jiffies;
901 next = now + HZ;
902
903 if (!(state & NUD_IN_TIMER))
904 goto out;
905
906 if (state & NUD_REACHABLE) {
907 if (time_before_eq(now,
908 neigh->confirmed + neigh->parms->reachable_time)) {
909 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
910 next = neigh->confirmed + neigh->parms->reachable_time;
911 } else if (time_before_eq(now,
912 neigh->used + neigh->parms->delay_probe_time)) {
913 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
914 neigh->nud_state = NUD_DELAY;
915 neigh->updated = jiffies;
916 neigh_suspect(neigh);
917 next = now + neigh->parms->delay_probe_time;
918 } else {
919 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
920 neigh->nud_state = NUD_STALE;
921 neigh->updated = jiffies;
922 neigh_suspect(neigh);
923 notify = 1;
924 }
925 } else if (state & NUD_DELAY) {
926 if (time_before_eq(now,
927 neigh->confirmed + neigh->parms->delay_probe_time)) {
928 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
929 neigh->nud_state = NUD_REACHABLE;
930 neigh->updated = jiffies;
931 neigh_connect(neigh);
932 notify = 1;
933 next = neigh->confirmed + neigh->parms->reachable_time;
934 } else {
935 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
936 neigh->nud_state = NUD_PROBE;
937 neigh->updated = jiffies;
938 atomic_set(&neigh->probes, 0);
939 next = now + neigh->parms->retrans_time;
940 }
941 } else {
942 /* NUD_PROBE|NUD_INCOMPLETE */
943 next = now + neigh->parms->retrans_time;
944 }
945
946 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
947 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
948 neigh->nud_state = NUD_FAILED;
949 notify = 1;
950 neigh_invalidate(neigh);
951 }
952
953 if (neigh->nud_state & NUD_IN_TIMER) {
954 if (time_before(next, jiffies + HZ/2))
955 next = jiffies + HZ/2;
956 if (!mod_timer(&neigh->timer, next))
957 neigh_hold(neigh);
958 }
959 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
960 neigh_probe(neigh);
961 } else {
962out:
963 write_unlock(&neigh->lock);
964 }
965
966 if (notify)
967 neigh_update_notify(neigh);
968
969 neigh_release(neigh);
970}
971
972int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
973{
974 int rc;
975 bool immediate_probe = false;
976
977 write_lock_bh(&neigh->lock);
978
979 rc = 0;
980 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
981 goto out_unlock_bh;
982
983 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
984 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
985 unsigned long next, now = jiffies;
986
987 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
988 neigh->nud_state = NUD_INCOMPLETE;
989 neigh->updated = now;
990 next = now + max(neigh->parms->retrans_time, HZ/2);
991 neigh_add_timer(neigh, next);
992 immediate_probe = true;
993 } else {
994 neigh->nud_state = NUD_FAILED;
995 neigh->updated = jiffies;
996 write_unlock_bh(&neigh->lock);
997
998 kfree_skb(skb);
999 return 1;
1000 }
1001 } else if (neigh->nud_state & NUD_STALE) {
1002 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1003 neigh->nud_state = NUD_DELAY;
1004 neigh->updated = jiffies;
1005 neigh_add_timer(neigh,
1006 jiffies + neigh->parms->delay_probe_time);
1007 }
1008
1009 if (neigh->nud_state == NUD_INCOMPLETE) {
1010 if (skb) {
1011 while (neigh->arp_queue_len_bytes + skb->truesize >
1012 neigh->parms->queue_len_bytes) {
1013 struct sk_buff *buff;
1014
1015 buff = __skb_dequeue(&neigh->arp_queue);
1016 if (!buff)
1017 break;
1018 neigh->arp_queue_len_bytes -= buff->truesize;
1019 kfree_skb(buff);
1020 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1021 }
1022 skb_dst_force(skb);
1023 __skb_queue_tail(&neigh->arp_queue, skb);
1024 neigh->arp_queue_len_bytes += skb->truesize;
1025 }
1026 rc = 1;
1027 }
1028out_unlock_bh:
1029 if (immediate_probe)
1030 neigh_probe(neigh);
1031 else
1032 write_unlock(&neigh->lock);
1033 local_bh_enable();
1034 return rc;
1035}
1036EXPORT_SYMBOL(__neigh_event_send);
1037
1038static void neigh_update_hhs(struct neighbour *neigh)
1039{
1040 struct hh_cache *hh;
1041 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1042 = NULL;
1043
1044 if (neigh->dev->header_ops)
1045 update = neigh->dev->header_ops->cache_update;
1046
1047 if (update) {
1048 hh = &neigh->hh;
1049 if (hh->hh_len) {
1050 write_seqlock_bh(&hh->hh_lock);
1051 update(hh, neigh->dev, neigh->ha);
1052 write_sequnlock_bh(&hh->hh_lock);
1053 }
1054 }
1055}
1056
1057
1058
1059/* Generic update routine.
1060 -- lladdr is new lladdr or NULL, if it is not supplied.
1061 -- new is new state.
1062 -- flags
1063 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1064 if it is different.
1065 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1066 lladdr instead of overriding it
1067 if it is different.
1068 It also allows to retain current state
1069 if lladdr is unchanged.
1070 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1071
1072 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1073 NTF_ROUTER flag.
1074 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1075 a router.
1076
1077 Caller MUST hold reference count on the entry.
1078 */
1079
1080int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1081 u32 flags)
1082{
1083 u8 old;
1084 int err;
1085 int notify = 0;
1086 struct net_device *dev;
1087 int update_isrouter = 0;
1088
1089 write_lock_bh(&neigh->lock);
1090
1091 dev = neigh->dev;
1092 old = neigh->nud_state;
1093 err = -EPERM;
1094
1095 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1096 (old & (NUD_NOARP | NUD_PERMANENT)))
1097 goto out;
1098
1099 if (!(new & NUD_VALID)) {
1100 neigh_del_timer(neigh);
1101 if (old & NUD_CONNECTED)
1102 neigh_suspect(neigh);
1103 neigh->nud_state = new;
1104 err = 0;
1105 notify = old & NUD_VALID;
1106 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1107 (new & NUD_FAILED)) {
1108 neigh_invalidate(neigh);
1109 notify = 1;
1110 }
1111 goto out;
1112 }
1113
1114 /* Compare new lladdr with cached one */
1115 if (!dev->addr_len) {
1116 /* First case: device needs no address. */
1117 lladdr = neigh->ha;
1118 } else if (lladdr) {
1119 /* The second case: if something is already cached
1120 and a new address is proposed:
1121 - compare new & old
1122 - if they are different, check override flag
1123 */
1124 if ((old & NUD_VALID) &&
1125 !memcmp(lladdr, neigh->ha, dev->addr_len))
1126 lladdr = neigh->ha;
1127 } else {
1128 /* No address is supplied; if we know something,
1129 use it, otherwise discard the request.
1130 */
1131 err = -EINVAL;
1132 if (!(old & NUD_VALID))
1133 goto out;
1134 lladdr = neigh->ha;
1135 }
1136
1137 if (new & NUD_CONNECTED)
1138 neigh->confirmed = jiffies;
1139 neigh->updated = jiffies;
1140
1141 /* If entry was valid and address is not changed,
1142 do not change entry state, if new one is STALE.
1143 */
1144 err = 0;
1145 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1146 if (old & NUD_VALID) {
1147 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1148 update_isrouter = 0;
1149 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1150 (old & NUD_CONNECTED)) {
1151 lladdr = neigh->ha;
1152 new = NUD_STALE;
1153 } else
1154 goto out;
1155 } else {
1156 if (lladdr == neigh->ha && new == NUD_STALE &&
1157 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1158 (old & NUD_CONNECTED))
1159 )
1160 new = old;
1161 }
1162 }
1163
1164 if (new != old) {
1165 neigh_del_timer(neigh);
1166 if (new & NUD_IN_TIMER)
1167 neigh_add_timer(neigh, (jiffies +
1168 ((new & NUD_REACHABLE) ?
1169 neigh->parms->reachable_time :
1170 0)));
1171 neigh->nud_state = new;
1172 }
1173
1174 if (lladdr != neigh->ha) {
1175 write_seqlock(&neigh->ha_lock);
1176 memcpy(&neigh->ha, lladdr, dev->addr_len);
1177 write_sequnlock(&neigh->ha_lock);
1178 neigh_update_hhs(neigh);
1179 if (!(new & NUD_CONNECTED))
1180 neigh->confirmed = jiffies -
1181 (neigh->parms->base_reachable_time << 1);
1182 notify = 1;
1183 }
1184 if (new == old)
1185 goto out;
1186 if (new & NUD_CONNECTED)
1187 neigh_connect(neigh);
1188 else
1189 neigh_suspect(neigh);
1190 if (!(old & NUD_VALID)) {
1191 struct sk_buff *skb;
1192
1193 /* Again: avoid dead loop if something went wrong */
1194
1195 while (neigh->nud_state & NUD_VALID &&
1196 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1197 struct dst_entry *dst = skb_dst(skb);
1198 struct neighbour *n2, *n1 = neigh;
1199 write_unlock_bh(&neigh->lock);
1200
1201 rcu_read_lock();
1202 /* On shaper/eql skb->dst->neighbour != neigh :( */
1203 if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
1204 n1 = n2;
1205 n1->output(n1, skb);
1206 rcu_read_unlock();
1207
1208 write_lock_bh(&neigh->lock);
1209 }
1210 skb_queue_purge(&neigh->arp_queue);
1211 neigh->arp_queue_len_bytes = 0;
1212 }
1213out:
1214 if (update_isrouter) {
1215 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1216 (neigh->flags | NTF_ROUTER) :
1217 (neigh->flags & ~NTF_ROUTER);
1218 }
1219 write_unlock_bh(&neigh->lock);
1220
1221 if (notify)
1222 neigh_update_notify(neigh);
1223
1224 return err;
1225}
1226EXPORT_SYMBOL(neigh_update);
1227
1228struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1229 u8 *lladdr, void *saddr,
1230 struct net_device *dev)
1231{
1232 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1233 lladdr || !dev->addr_len);
1234 if (neigh)
1235 neigh_update(neigh, lladdr, NUD_STALE,
1236 NEIGH_UPDATE_F_OVERRIDE);
1237 return neigh;
1238}
1239EXPORT_SYMBOL(neigh_event_ns);
1240
1241/* called with read_lock_bh(&n->lock); */
1242static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1243{
1244 struct net_device *dev = dst->dev;
1245 __be16 prot = dst->ops->protocol;
1246 struct hh_cache *hh = &n->hh;
1247
1248 write_lock_bh(&n->lock);
1249
1250 /* Only one thread can come in here and initialize the
1251 * hh_cache entry.
1252 */
1253 if (!hh->hh_len)
1254 dev->header_ops->cache(n, hh, prot);
1255
1256 write_unlock_bh(&n->lock);
1257}
1258
1259/* This function can be used in contexts, where only old dev_queue_xmit
1260 * worked, f.e. if you want to override normal output path (eql, shaper),
1261 * but resolution is not made yet.
1262 */
1263
1264int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1265{
1266 struct net_device *dev = skb->dev;
1267
1268 __skb_pull(skb, skb_network_offset(skb));
1269
1270 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1271 skb->len) < 0 &&
1272 dev->header_ops->rebuild(skb))
1273 return 0;
1274
1275 return dev_queue_xmit(skb);
1276}
1277EXPORT_SYMBOL(neigh_compat_output);
1278
1279/* Slow and careful. */
1280
1281int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1282{
1283 struct dst_entry *dst = skb_dst(skb);
1284 int rc = 0;
1285
1286 if (!dst)
1287 goto discard;
1288
1289 __skb_pull(skb, skb_network_offset(skb));
1290
1291 if (!neigh_event_send(neigh, skb)) {
1292 int err;
1293 struct net_device *dev = neigh->dev;
1294 unsigned int seq;
1295
1296 if (dev->header_ops->cache && !neigh->hh.hh_len)
1297 neigh_hh_init(neigh, dst);
1298
1299 do {
1300 seq = read_seqbegin(&neigh->ha_lock);
1301 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1302 neigh->ha, NULL, skb->len);
1303 } while (read_seqretry(&neigh->ha_lock, seq));
1304
1305 if (err >= 0)
1306 rc = dev_queue_xmit(skb);
1307 else
1308 goto out_kfree_skb;
1309 }
1310out:
1311 return rc;
1312discard:
1313 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1314 dst, neigh);
1315out_kfree_skb:
1316 rc = -EINVAL;
1317 kfree_skb(skb);
1318 goto out;
1319}
1320EXPORT_SYMBOL(neigh_resolve_output);
1321
1322/* As fast as possible without hh cache */
1323
1324int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1325{
1326 struct net_device *dev = neigh->dev;
1327 unsigned int seq;
1328 int err;
1329
1330 __skb_pull(skb, skb_network_offset(skb));
1331
1332 do {
1333 seq = read_seqbegin(&neigh->ha_lock);
1334 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1335 neigh->ha, NULL, skb->len);
1336 } while (read_seqretry(&neigh->ha_lock, seq));
1337
1338 if (err >= 0)
1339 err = dev_queue_xmit(skb);
1340 else {
1341 err = -EINVAL;
1342 kfree_skb(skb);
1343 }
1344 return err;
1345}
1346EXPORT_SYMBOL(neigh_connected_output);
1347
1348int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1349{
1350 return dev_queue_xmit(skb);
1351}
1352EXPORT_SYMBOL(neigh_direct_output);
1353
1354static void neigh_proxy_process(unsigned long arg)
1355{
1356 struct neigh_table *tbl = (struct neigh_table *)arg;
1357 long sched_next = 0;
1358 unsigned long now = jiffies;
1359 struct sk_buff *skb, *n;
1360
1361 spin_lock(&tbl->proxy_queue.lock);
1362
1363 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1364 long tdif = NEIGH_CB(skb)->sched_next - now;
1365
1366 if (tdif <= 0) {
1367 struct net_device *dev = skb->dev;
1368
1369 __skb_unlink(skb, &tbl->proxy_queue);
1370 if (tbl->proxy_redo && netif_running(dev)) {
1371 rcu_read_lock();
1372 tbl->proxy_redo(skb);
1373 rcu_read_unlock();
1374 } else {
1375 kfree_skb(skb);
1376 }
1377
1378 dev_put(dev);
1379 } else if (!sched_next || tdif < sched_next)
1380 sched_next = tdif;
1381 }
1382 del_timer(&tbl->proxy_timer);
1383 if (sched_next)
1384 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1385 spin_unlock(&tbl->proxy_queue.lock);
1386}
1387
1388void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1389 struct sk_buff *skb)
1390{
1391 unsigned long now = jiffies;
1392 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1393
1394 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1395 kfree_skb(skb);
1396 return;
1397 }
1398
1399 NEIGH_CB(skb)->sched_next = sched_next;
1400 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1401
1402 spin_lock(&tbl->proxy_queue.lock);
1403 if (del_timer(&tbl->proxy_timer)) {
1404 if (time_before(tbl->proxy_timer.expires, sched_next))
1405 sched_next = tbl->proxy_timer.expires;
1406 }
1407 skb_dst_drop(skb);
1408 dev_hold(skb->dev);
1409 __skb_queue_tail(&tbl->proxy_queue, skb);
1410 mod_timer(&tbl->proxy_timer, sched_next);
1411 spin_unlock(&tbl->proxy_queue.lock);
1412}
1413EXPORT_SYMBOL(pneigh_enqueue);
1414
1415static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1416 struct net *net, int ifindex)
1417{
1418 struct neigh_parms *p;
1419
1420 for (p = &tbl->parms; p; p = p->next) {
1421 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1422 (!p->dev && !ifindex))
1423 return p;
1424 }
1425
1426 return NULL;
1427}
1428
1429struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1430 struct neigh_table *tbl)
1431{
1432 struct neigh_parms *p, *ref;
1433 struct net *net = dev_net(dev);
1434 const struct net_device_ops *ops = dev->netdev_ops;
1435
1436 ref = lookup_neigh_parms(tbl, net, 0);
1437 if (!ref)
1438 return NULL;
1439
1440 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1441 if (p) {
1442 p->tbl = tbl;
1443 atomic_set(&p->refcnt, 1);
1444 p->reachable_time =
1445 neigh_rand_reach_time(p->base_reachable_time);
1446
1447 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1448 kfree(p);
1449 return NULL;
1450 }
1451
1452 dev_hold(dev);
1453 p->dev = dev;
1454 write_pnet(&p->net, hold_net(net));
1455 p->sysctl_table = NULL;
1456 write_lock_bh(&tbl->lock);
1457 p->next = tbl->parms.next;
1458 tbl->parms.next = p;
1459 write_unlock_bh(&tbl->lock);
1460 }
1461 return p;
1462}
1463EXPORT_SYMBOL(neigh_parms_alloc);
1464
1465static void neigh_rcu_free_parms(struct rcu_head *head)
1466{
1467 struct neigh_parms *parms =
1468 container_of(head, struct neigh_parms, rcu_head);
1469
1470 neigh_parms_put(parms);
1471}
1472
1473void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1474{
1475 struct neigh_parms **p;
1476
1477 if (!parms || parms == &tbl->parms)
1478 return;
1479 write_lock_bh(&tbl->lock);
1480 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1481 if (*p == parms) {
1482 *p = parms->next;
1483 parms->dead = 1;
1484 write_unlock_bh(&tbl->lock);
1485 if (parms->dev)
1486 dev_put(parms->dev);
1487 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1488 return;
1489 }
1490 }
1491 write_unlock_bh(&tbl->lock);
1492 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1493}
1494EXPORT_SYMBOL(neigh_parms_release);
1495
1496static void neigh_parms_destroy(struct neigh_parms *parms)
1497{
1498 release_net(neigh_parms_net(parms));
1499 kfree(parms);
1500}
1501
1502static struct lock_class_key neigh_table_proxy_queue_class;
1503
1504static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1505{
1506 unsigned long now = jiffies;
1507 unsigned long phsize;
1508
1509 write_pnet(&tbl->parms.net, &init_net);
1510 atomic_set(&tbl->parms.refcnt, 1);
1511 tbl->parms.reachable_time =
1512 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1513
1514 tbl->stats = alloc_percpu(struct neigh_statistics);
1515 if (!tbl->stats)
1516 panic("cannot create neighbour cache statistics");
1517
1518#ifdef CONFIG_PROC_FS
1519 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1520 &neigh_stat_seq_fops, tbl))
1521 panic("cannot create neighbour proc dir entry");
1522#endif
1523
1524 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1525
1526 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1527 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1528
1529 if (!tbl->nht || !tbl->phash_buckets)
1530 panic("cannot allocate neighbour cache hashes");
1531
1532 rwlock_init(&tbl->lock);
1533 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1534 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1535 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1536 skb_queue_head_init_class(&tbl->proxy_queue,
1537 &neigh_table_proxy_queue_class);
1538
1539 tbl->last_flush = now;
1540 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1541}
1542
1543void neigh_table_init(struct neigh_table *tbl)
1544{
1545 struct neigh_table *tmp;
1546
1547 neigh_table_init_no_netlink(tbl);
1548 write_lock(&neigh_tbl_lock);
1549 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1550 if (tmp->family == tbl->family)
1551 break;
1552 }
1553 tbl->next = neigh_tables;
1554 neigh_tables = tbl;
1555 write_unlock(&neigh_tbl_lock);
1556
1557 if (unlikely(tmp)) {
1558 pr_err("Registering multiple tables for family %d\n",
1559 tbl->family);
1560 dump_stack();
1561 }
1562}
1563EXPORT_SYMBOL(neigh_table_init);
1564
1565int neigh_table_clear(struct neigh_table *tbl)
1566{
1567 struct neigh_table **tp;
1568
1569 /* It is not clean... Fix it to unload IPv6 module safely */
1570 cancel_delayed_work_sync(&tbl->gc_work);
1571 del_timer_sync(&tbl->proxy_timer);
1572 pneigh_queue_purge(&tbl->proxy_queue);
1573 neigh_ifdown(tbl, NULL);
1574 if (atomic_read(&tbl->entries))
1575 pr_crit("neighbour leakage\n");
1576 write_lock(&neigh_tbl_lock);
1577 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1578 if (*tp == tbl) {
1579 *tp = tbl->next;
1580 break;
1581 }
1582 }
1583 write_unlock(&neigh_tbl_lock);
1584
1585 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1586 neigh_hash_free_rcu);
1587 tbl->nht = NULL;
1588
1589 kfree(tbl->phash_buckets);
1590 tbl->phash_buckets = NULL;
1591
1592 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1593
1594 free_percpu(tbl->stats);
1595 tbl->stats = NULL;
1596
1597 return 0;
1598}
1599EXPORT_SYMBOL(neigh_table_clear);
1600
1601static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1602{
1603 struct net *net = sock_net(skb->sk);
1604 struct ndmsg *ndm;
1605 struct nlattr *dst_attr;
1606 struct neigh_table *tbl;
1607 struct net_device *dev = NULL;
1608 int err = -EINVAL;
1609
1610 ASSERT_RTNL();
1611 if (nlmsg_len(nlh) < sizeof(*ndm))
1612 goto out;
1613
1614 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1615 if (dst_attr == NULL)
1616 goto out;
1617
1618 ndm = nlmsg_data(nlh);
1619 if (ndm->ndm_ifindex) {
1620 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1621 if (dev == NULL) {
1622 err = -ENODEV;
1623 goto out;
1624 }
1625 }
1626
1627 read_lock(&neigh_tbl_lock);
1628 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1629 struct neighbour *neigh;
1630
1631 if (tbl->family != ndm->ndm_family)
1632 continue;
1633 read_unlock(&neigh_tbl_lock);
1634
1635 if (nla_len(dst_attr) < tbl->key_len)
1636 goto out;
1637
1638 if (ndm->ndm_flags & NTF_PROXY) {
1639 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1640 goto out;
1641 }
1642
1643 if (dev == NULL)
1644 goto out;
1645
1646 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1647 if (neigh == NULL) {
1648 err = -ENOENT;
1649 goto out;
1650 }
1651
1652 err = neigh_update(neigh, NULL, NUD_FAILED,
1653 NEIGH_UPDATE_F_OVERRIDE |
1654 NEIGH_UPDATE_F_ADMIN);
1655 neigh_release(neigh);
1656 goto out;
1657 }
1658 read_unlock(&neigh_tbl_lock);
1659 err = -EAFNOSUPPORT;
1660
1661out:
1662 return err;
1663}
1664
1665static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1666{
1667 struct net *net = sock_net(skb->sk);
1668 struct ndmsg *ndm;
1669 struct nlattr *tb[NDA_MAX+1];
1670 struct neigh_table *tbl;
1671 struct net_device *dev = NULL;
1672 int err;
1673
1674 ASSERT_RTNL();
1675 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1676 if (err < 0)
1677 goto out;
1678
1679 err = -EINVAL;
1680 if (tb[NDA_DST] == NULL)
1681 goto out;
1682
1683 ndm = nlmsg_data(nlh);
1684 if (ndm->ndm_ifindex) {
1685 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1686 if (dev == NULL) {
1687 err = -ENODEV;
1688 goto out;
1689 }
1690
1691 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1692 goto out;
1693 }
1694
1695 read_lock(&neigh_tbl_lock);
1696 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1697 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1698 struct neighbour *neigh;
1699 void *dst, *lladdr;
1700
1701 if (tbl->family != ndm->ndm_family)
1702 continue;
1703 read_unlock(&neigh_tbl_lock);
1704
1705 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1706 goto out;
1707 dst = nla_data(tb[NDA_DST]);
1708 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1709
1710 if (ndm->ndm_flags & NTF_PROXY) {
1711 struct pneigh_entry *pn;
1712
1713 err = -ENOBUFS;
1714 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1715 if (pn) {
1716 pn->flags = ndm->ndm_flags;
1717 err = 0;
1718 }
1719 goto out;
1720 }
1721
1722 if (dev == NULL)
1723 goto out;
1724
1725 neigh = neigh_lookup(tbl, dst, dev);
1726 if (neigh == NULL) {
1727 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1728 err = -ENOENT;
1729 goto out;
1730 }
1731
1732 neigh = __neigh_lookup_errno(tbl, dst, dev);
1733 if (IS_ERR(neigh)) {
1734 err = PTR_ERR(neigh);
1735 goto out;
1736 }
1737 } else {
1738 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1739 err = -EEXIST;
1740 neigh_release(neigh);
1741 goto out;
1742 }
1743
1744 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1745 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1746 }
1747
1748 if (ndm->ndm_flags & NTF_USE) {
1749 neigh_event_send(neigh, NULL);
1750 err = 0;
1751 } else
1752 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1753 neigh_release(neigh);
1754 goto out;
1755 }
1756
1757 read_unlock(&neigh_tbl_lock);
1758 err = -EAFNOSUPPORT;
1759out:
1760 return err;
1761}
1762
1763static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1764{
1765 struct nlattr *nest;
1766
1767 nest = nla_nest_start(skb, NDTA_PARMS);
1768 if (nest == NULL)
1769 return -ENOBUFS;
1770
1771 if ((parms->dev &&
1772 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1773 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1774 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1775 /* approximative value for deprecated QUEUE_LEN (in packets) */
1776 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1777 DIV_ROUND_UP(parms->queue_len_bytes,
1778 SKB_TRUESIZE(ETH_FRAME_LEN))) ||
1779 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1780 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1781 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
1782 nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
1783 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1784 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1785 parms->base_reachable_time) ||
1786 nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
1787 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1788 parms->delay_probe_time) ||
1789 nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
1790 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
1791 nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
1792 nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
1793 goto nla_put_failure;
1794 return nla_nest_end(skb, nest);
1795
1796nla_put_failure:
1797 nla_nest_cancel(skb, nest);
1798 return -EMSGSIZE;
1799}
1800
1801static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1802 u32 pid, u32 seq, int type, int flags)
1803{
1804 struct nlmsghdr *nlh;
1805 struct ndtmsg *ndtmsg;
1806
1807 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1808 if (nlh == NULL)
1809 return -EMSGSIZE;
1810
1811 ndtmsg = nlmsg_data(nlh);
1812
1813 read_lock_bh(&tbl->lock);
1814 ndtmsg->ndtm_family = tbl->family;
1815 ndtmsg->ndtm_pad1 = 0;
1816 ndtmsg->ndtm_pad2 = 0;
1817
1818 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1819 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1820 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1821 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1822 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1823 goto nla_put_failure;
1824 {
1825 unsigned long now = jiffies;
1826 unsigned int flush_delta = now - tbl->last_flush;
1827 unsigned int rand_delta = now - tbl->last_rand;
1828 struct neigh_hash_table *nht;
1829 struct ndt_config ndc = {
1830 .ndtc_key_len = tbl->key_len,
1831 .ndtc_entry_size = tbl->entry_size,
1832 .ndtc_entries = atomic_read(&tbl->entries),
1833 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1834 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1835 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1836 };
1837
1838 rcu_read_lock_bh();
1839 nht = rcu_dereference_bh(tbl->nht);
1840 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1841 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1842 rcu_read_unlock_bh();
1843
1844 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1845 goto nla_put_failure;
1846 }
1847
1848 {
1849 int cpu;
1850 struct ndt_stats ndst;
1851
1852 memset(&ndst, 0, sizeof(ndst));
1853
1854 for_each_possible_cpu(cpu) {
1855 struct neigh_statistics *st;
1856
1857 st = per_cpu_ptr(tbl->stats, cpu);
1858 ndst.ndts_allocs += st->allocs;
1859 ndst.ndts_destroys += st->destroys;
1860 ndst.ndts_hash_grows += st->hash_grows;
1861 ndst.ndts_res_failed += st->res_failed;
1862 ndst.ndts_lookups += st->lookups;
1863 ndst.ndts_hits += st->hits;
1864 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1865 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1866 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1867 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1868 }
1869
1870 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1871 goto nla_put_failure;
1872 }
1873
1874 BUG_ON(tbl->parms.dev);
1875 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1876 goto nla_put_failure;
1877
1878 read_unlock_bh(&tbl->lock);
1879 return nlmsg_end(skb, nlh);
1880
1881nla_put_failure:
1882 read_unlock_bh(&tbl->lock);
1883 nlmsg_cancel(skb, nlh);
1884 return -EMSGSIZE;
1885}
1886
1887static int neightbl_fill_param_info(struct sk_buff *skb,
1888 struct neigh_table *tbl,
1889 struct neigh_parms *parms,
1890 u32 pid, u32 seq, int type,
1891 unsigned int flags)
1892{
1893 struct ndtmsg *ndtmsg;
1894 struct nlmsghdr *nlh;
1895
1896 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1897 if (nlh == NULL)
1898 return -EMSGSIZE;
1899
1900 ndtmsg = nlmsg_data(nlh);
1901
1902 read_lock_bh(&tbl->lock);
1903 ndtmsg->ndtm_family = tbl->family;
1904 ndtmsg->ndtm_pad1 = 0;
1905 ndtmsg->ndtm_pad2 = 0;
1906
1907 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1908 neightbl_fill_parms(skb, parms) < 0)
1909 goto errout;
1910
1911 read_unlock_bh(&tbl->lock);
1912 return nlmsg_end(skb, nlh);
1913errout:
1914 read_unlock_bh(&tbl->lock);
1915 nlmsg_cancel(skb, nlh);
1916 return -EMSGSIZE;
1917}
1918
1919static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1920 [NDTA_NAME] = { .type = NLA_STRING },
1921 [NDTA_THRESH1] = { .type = NLA_U32 },
1922 [NDTA_THRESH2] = { .type = NLA_U32 },
1923 [NDTA_THRESH3] = { .type = NLA_U32 },
1924 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1925 [NDTA_PARMS] = { .type = NLA_NESTED },
1926};
1927
1928static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1929 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1930 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1931 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1932 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1933 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1934 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1935 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1936 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1937 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1938 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1939 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1940 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1941 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1942};
1943
1944static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1945{
1946 struct net *net = sock_net(skb->sk);
1947 struct neigh_table *tbl;
1948 struct ndtmsg *ndtmsg;
1949 struct nlattr *tb[NDTA_MAX+1];
1950 int err;
1951
1952 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1953 nl_neightbl_policy);
1954 if (err < 0)
1955 goto errout;
1956
1957 if (tb[NDTA_NAME] == NULL) {
1958 err = -EINVAL;
1959 goto errout;
1960 }
1961
1962 ndtmsg = nlmsg_data(nlh);
1963 read_lock(&neigh_tbl_lock);
1964 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1965 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1966 continue;
1967
1968 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1969 break;
1970 }
1971
1972 if (tbl == NULL) {
1973 err = -ENOENT;
1974 goto errout_locked;
1975 }
1976
1977 /*
1978 * We acquire tbl->lock to be nice to the periodic timers and
1979 * make sure they always see a consistent set of values.
1980 */
1981 write_lock_bh(&tbl->lock);
1982
1983 if (tb[NDTA_PARMS]) {
1984 struct nlattr *tbp[NDTPA_MAX+1];
1985 struct neigh_parms *p;
1986 int i, ifindex = 0;
1987
1988 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1989 nl_ntbl_parm_policy);
1990 if (err < 0)
1991 goto errout_tbl_lock;
1992
1993 if (tbp[NDTPA_IFINDEX])
1994 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1995
1996 p = lookup_neigh_parms(tbl, net, ifindex);
1997 if (p == NULL) {
1998 err = -ENOENT;
1999 goto errout_tbl_lock;
2000 }
2001
2002 for (i = 1; i <= NDTPA_MAX; i++) {
2003 if (tbp[i] == NULL)
2004 continue;
2005
2006 switch (i) {
2007 case NDTPA_QUEUE_LEN:
2008 p->queue_len_bytes = nla_get_u32(tbp[i]) *
2009 SKB_TRUESIZE(ETH_FRAME_LEN);
2010 break;
2011 case NDTPA_QUEUE_LENBYTES:
2012 p->queue_len_bytes = nla_get_u32(tbp[i]);
2013 break;
2014 case NDTPA_PROXY_QLEN:
2015 p->proxy_qlen = nla_get_u32(tbp[i]);
2016 break;
2017 case NDTPA_APP_PROBES:
2018 p->app_probes = nla_get_u32(tbp[i]);
2019 break;
2020 case NDTPA_UCAST_PROBES:
2021 p->ucast_probes = nla_get_u32(tbp[i]);
2022 break;
2023 case NDTPA_MCAST_PROBES:
2024 p->mcast_probes = nla_get_u32(tbp[i]);
2025 break;
2026 case NDTPA_BASE_REACHABLE_TIME:
2027 p->base_reachable_time = nla_get_msecs(tbp[i]);
2028 break;
2029 case NDTPA_GC_STALETIME:
2030 p->gc_staletime = nla_get_msecs(tbp[i]);
2031 break;
2032 case NDTPA_DELAY_PROBE_TIME:
2033 p->delay_probe_time = nla_get_msecs(tbp[i]);
2034 break;
2035 case NDTPA_RETRANS_TIME:
2036 p->retrans_time = nla_get_msecs(tbp[i]);
2037 break;
2038 case NDTPA_ANYCAST_DELAY:
2039 p->anycast_delay = nla_get_msecs(tbp[i]);
2040 break;
2041 case NDTPA_PROXY_DELAY:
2042 p->proxy_delay = nla_get_msecs(tbp[i]);
2043 break;
2044 case NDTPA_LOCKTIME:
2045 p->locktime = nla_get_msecs(tbp[i]);
2046 break;
2047 }
2048 }
2049 }
2050
2051 if (tb[NDTA_THRESH1])
2052 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2053
2054 if (tb[NDTA_THRESH2])
2055 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2056
2057 if (tb[NDTA_THRESH3])
2058 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2059
2060 if (tb[NDTA_GC_INTERVAL])
2061 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2062
2063 err = 0;
2064
2065errout_tbl_lock:
2066 write_unlock_bh(&tbl->lock);
2067errout_locked:
2068 read_unlock(&neigh_tbl_lock);
2069errout:
2070 return err;
2071}
2072
2073static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2074{
2075 struct net *net = sock_net(skb->sk);
2076 int family, tidx, nidx = 0;
2077 int tbl_skip = cb->args[0];
2078 int neigh_skip = cb->args[1];
2079 struct neigh_table *tbl;
2080
2081 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2082
2083 read_lock(&neigh_tbl_lock);
2084 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2085 struct neigh_parms *p;
2086
2087 if (tidx < tbl_skip || (family && tbl->family != family))
2088 continue;
2089
2090 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2091 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2092 NLM_F_MULTI) <= 0)
2093 break;
2094
2095 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2096 if (!net_eq(neigh_parms_net(p), net))
2097 continue;
2098
2099 if (nidx < neigh_skip)
2100 goto next;
2101
2102 if (neightbl_fill_param_info(skb, tbl, p,
2103 NETLINK_CB(cb->skb).pid,
2104 cb->nlh->nlmsg_seq,
2105 RTM_NEWNEIGHTBL,
2106 NLM_F_MULTI) <= 0)
2107 goto out;
2108 next:
2109 nidx++;
2110 }
2111
2112 neigh_skip = 0;
2113 }
2114out:
2115 read_unlock(&neigh_tbl_lock);
2116 cb->args[0] = tidx;
2117 cb->args[1] = nidx;
2118
2119 return skb->len;
2120}
2121
2122static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2123 u32 pid, u32 seq, int type, unsigned int flags)
2124{
2125 unsigned long now = jiffies;
2126 struct nda_cacheinfo ci;
2127 struct nlmsghdr *nlh;
2128 struct ndmsg *ndm;
2129
2130 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2131 if (nlh == NULL)
2132 return -EMSGSIZE;
2133
2134 ndm = nlmsg_data(nlh);
2135 ndm->ndm_family = neigh->ops->family;
2136 ndm->ndm_pad1 = 0;
2137 ndm->ndm_pad2 = 0;
2138 ndm->ndm_flags = neigh->flags;
2139 ndm->ndm_type = neigh->type;
2140 ndm->ndm_ifindex = neigh->dev->ifindex;
2141
2142 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2143 goto nla_put_failure;
2144
2145 read_lock_bh(&neigh->lock);
2146 ndm->ndm_state = neigh->nud_state;
2147 if (neigh->nud_state & NUD_VALID) {
2148 char haddr[MAX_ADDR_LEN];
2149
2150 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2151 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2152 read_unlock_bh(&neigh->lock);
2153 goto nla_put_failure;
2154 }
2155 }
2156
2157 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2158 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2159 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2160 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2161 read_unlock_bh(&neigh->lock);
2162
2163 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2164 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2165 goto nla_put_failure;
2166
2167 return nlmsg_end(skb, nlh);
2168
2169nla_put_failure:
2170 nlmsg_cancel(skb, nlh);
2171 return -EMSGSIZE;
2172}
2173
2174static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2175 u32 pid, u32 seq, int type, unsigned int flags,
2176 struct neigh_table *tbl)
2177{
2178 struct nlmsghdr *nlh;
2179 struct ndmsg *ndm;
2180
2181 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2182 if (nlh == NULL)
2183 return -EMSGSIZE;
2184
2185 ndm = nlmsg_data(nlh);
2186 ndm->ndm_family = tbl->family;
2187 ndm->ndm_pad1 = 0;
2188 ndm->ndm_pad2 = 0;
2189 ndm->ndm_flags = pn->flags | NTF_PROXY;
2190 ndm->ndm_type = NDA_DST;
2191 ndm->ndm_ifindex = pn->dev->ifindex;
2192 ndm->ndm_state = NUD_NONE;
2193
2194 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2195 goto nla_put_failure;
2196
2197 return nlmsg_end(skb, nlh);
2198
2199nla_put_failure:
2200 nlmsg_cancel(skb, nlh);
2201 return -EMSGSIZE;
2202}
2203
2204static void neigh_update_notify(struct neighbour *neigh)
2205{
2206 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2207 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2208}
2209
2210static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2211 struct netlink_callback *cb)
2212{
2213 struct net *net = sock_net(skb->sk);
2214 struct neighbour *n;
2215 int rc, h, s_h = cb->args[1];
2216 int idx, s_idx = idx = cb->args[2];
2217 struct neigh_hash_table *nht;
2218
2219 rcu_read_lock_bh();
2220 nht = rcu_dereference_bh(tbl->nht);
2221
2222 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2223 if (h > s_h)
2224 s_idx = 0;
2225 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2226 n != NULL;
2227 n = rcu_dereference_bh(n->next)) {
2228 if (!net_eq(dev_net(n->dev), net))
2229 continue;
2230 if (idx < s_idx)
2231 goto next;
2232 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2233 cb->nlh->nlmsg_seq,
2234 RTM_NEWNEIGH,
2235 NLM_F_MULTI) <= 0) {
2236 rc = -1;
2237 goto out;
2238 }
2239next:
2240 idx++;
2241 }
2242 }
2243 rc = skb->len;
2244out:
2245 rcu_read_unlock_bh();
2246 cb->args[1] = h;
2247 cb->args[2] = idx;
2248 return rc;
2249}
2250
2251static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2252 struct netlink_callback *cb)
2253{
2254 struct pneigh_entry *n;
2255 struct net *net = sock_net(skb->sk);
2256 int rc, h, s_h = cb->args[3];
2257 int idx, s_idx = idx = cb->args[4];
2258
2259 read_lock_bh(&tbl->lock);
2260
2261 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2262 if (h > s_h)
2263 s_idx = 0;
2264 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2265 if (dev_net(n->dev) != net)
2266 continue;
2267 if (idx < s_idx)
2268 goto next;
2269 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2270 cb->nlh->nlmsg_seq,
2271 RTM_NEWNEIGH,
2272 NLM_F_MULTI, tbl) <= 0) {
2273 read_unlock_bh(&tbl->lock);
2274 rc = -1;
2275 goto out;
2276 }
2277 next:
2278 idx++;
2279 }
2280 }
2281
2282 read_unlock_bh(&tbl->lock);
2283 rc = skb->len;
2284out:
2285 cb->args[3] = h;
2286 cb->args[4] = idx;
2287 return rc;
2288
2289}
2290
2291static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2292{
2293 struct neigh_table *tbl;
2294 int t, family, s_t;
2295 int proxy = 0;
2296 int err;
2297
2298 read_lock(&neigh_tbl_lock);
2299 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2300
2301 /* check for full ndmsg structure presence, family member is
2302 * the same for both structures
2303 */
2304 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2305 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2306 proxy = 1;
2307
2308 s_t = cb->args[0];
2309
2310 for (tbl = neigh_tables, t = 0; tbl;
2311 tbl = tbl->next, t++) {
2312 if (t < s_t || (family && tbl->family != family))
2313 continue;
2314 if (t > s_t)
2315 memset(&cb->args[1], 0, sizeof(cb->args) -
2316 sizeof(cb->args[0]));
2317 if (proxy)
2318 err = pneigh_dump_table(tbl, skb, cb);
2319 else
2320 err = neigh_dump_table(tbl, skb, cb);
2321 if (err < 0)
2322 break;
2323 }
2324 read_unlock(&neigh_tbl_lock);
2325
2326 cb->args[0] = t;
2327 return skb->len;
2328}
2329
2330void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2331{
2332 int chain;
2333 struct neigh_hash_table *nht;
2334
2335 rcu_read_lock_bh();
2336 nht = rcu_dereference_bh(tbl->nht);
2337
2338 read_lock(&tbl->lock); /* avoid resizes */
2339 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2340 struct neighbour *n;
2341
2342 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2343 n != NULL;
2344 n = rcu_dereference_bh(n->next))
2345 cb(n, cookie);
2346 }
2347 read_unlock(&tbl->lock);
2348 rcu_read_unlock_bh();
2349}
2350EXPORT_SYMBOL(neigh_for_each);
2351
2352/* The tbl->lock must be held as a writer and BH disabled. */
2353void __neigh_for_each_release(struct neigh_table *tbl,
2354 int (*cb)(struct neighbour *))
2355{
2356 int chain;
2357 struct neigh_hash_table *nht;
2358
2359 nht = rcu_dereference_protected(tbl->nht,
2360 lockdep_is_held(&tbl->lock));
2361 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2362 struct neighbour *n;
2363 struct neighbour __rcu **np;
2364
2365 np = &nht->hash_buckets[chain];
2366 while ((n = rcu_dereference_protected(*np,
2367 lockdep_is_held(&tbl->lock))) != NULL) {
2368 int release;
2369
2370 write_lock(&n->lock);
2371 release = cb(n);
2372 if (release) {
2373 rcu_assign_pointer(*np,
2374 rcu_dereference_protected(n->next,
2375 lockdep_is_held(&tbl->lock)));
2376 n->dead = 1;
2377 } else
2378 np = &n->next;
2379 write_unlock(&n->lock);
2380 if (release)
2381 neigh_cleanup_and_release(n);
2382 }
2383 }
2384}
2385EXPORT_SYMBOL(__neigh_for_each_release);
2386
2387#ifdef CONFIG_PROC_FS
2388
2389static struct neighbour *neigh_get_first(struct seq_file *seq)
2390{
2391 struct neigh_seq_state *state = seq->private;
2392 struct net *net = seq_file_net(seq);
2393 struct neigh_hash_table *nht = state->nht;
2394 struct neighbour *n = NULL;
2395 int bucket = state->bucket;
2396
2397 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2398 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2399 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2400
2401 while (n) {
2402 if (!net_eq(dev_net(n->dev), net))
2403 goto next;
2404 if (state->neigh_sub_iter) {
2405 loff_t fakep = 0;
2406 void *v;
2407
2408 v = state->neigh_sub_iter(state, n, &fakep);
2409 if (!v)
2410 goto next;
2411 }
2412 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2413 break;
2414 if (n->nud_state & ~NUD_NOARP)
2415 break;
2416next:
2417 n = rcu_dereference_bh(n->next);
2418 }
2419
2420 if (n)
2421 break;
2422 }
2423 state->bucket = bucket;
2424
2425 return n;
2426}
2427
2428static struct neighbour *neigh_get_next(struct seq_file *seq,
2429 struct neighbour *n,
2430 loff_t *pos)
2431{
2432 struct neigh_seq_state *state = seq->private;
2433 struct net *net = seq_file_net(seq);
2434 struct neigh_hash_table *nht = state->nht;
2435
2436 if (state->neigh_sub_iter) {
2437 void *v = state->neigh_sub_iter(state, n, pos);
2438 if (v)
2439 return n;
2440 }
2441 n = rcu_dereference_bh(n->next);
2442
2443 while (1) {
2444 while (n) {
2445 if (!net_eq(dev_net(n->dev), net))
2446 goto next;
2447 if (state->neigh_sub_iter) {
2448 void *v = state->neigh_sub_iter(state, n, pos);
2449 if (v)
2450 return n;
2451 goto next;
2452 }
2453 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2454 break;
2455
2456 if (n->nud_state & ~NUD_NOARP)
2457 break;
2458next:
2459 n = rcu_dereference_bh(n->next);
2460 }
2461
2462 if (n)
2463 break;
2464
2465 if (++state->bucket >= (1 << nht->hash_shift))
2466 break;
2467
2468 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2469 }
2470
2471 if (n && pos)
2472 --(*pos);
2473 return n;
2474}
2475
2476static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2477{
2478 struct neighbour *n = neigh_get_first(seq);
2479
2480 if (n) {
2481 --(*pos);
2482 while (*pos) {
2483 n = neigh_get_next(seq, n, pos);
2484 if (!n)
2485 break;
2486 }
2487 }
2488 return *pos ? NULL : n;
2489}
2490
2491static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2492{
2493 struct neigh_seq_state *state = seq->private;
2494 struct net *net = seq_file_net(seq);
2495 struct neigh_table *tbl = state->tbl;
2496 struct pneigh_entry *pn = NULL;
2497 int bucket = state->bucket;
2498
2499 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2500 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2501 pn = tbl->phash_buckets[bucket];
2502 while (pn && !net_eq(pneigh_net(pn), net))
2503 pn = pn->next;
2504 if (pn)
2505 break;
2506 }
2507 state->bucket = bucket;
2508
2509 return pn;
2510}
2511
2512static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2513 struct pneigh_entry *pn,
2514 loff_t *pos)
2515{
2516 struct neigh_seq_state *state = seq->private;
2517 struct net *net = seq_file_net(seq);
2518 struct neigh_table *tbl = state->tbl;
2519
2520 do {
2521 pn = pn->next;
2522 } while (pn && !net_eq(pneigh_net(pn), net));
2523
2524 while (!pn) {
2525 if (++state->bucket > PNEIGH_HASHMASK)
2526 break;
2527 pn = tbl->phash_buckets[state->bucket];
2528 while (pn && !net_eq(pneigh_net(pn), net))
2529 pn = pn->next;
2530 if (pn)
2531 break;
2532 }
2533
2534 if (pn && pos)
2535 --(*pos);
2536
2537 return pn;
2538}
2539
2540static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2541{
2542 struct pneigh_entry *pn = pneigh_get_first(seq);
2543
2544 if (pn) {
2545 --(*pos);
2546 while (*pos) {
2547 pn = pneigh_get_next(seq, pn, pos);
2548 if (!pn)
2549 break;
2550 }
2551 }
2552 return *pos ? NULL : pn;
2553}
2554
2555static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2556{
2557 struct neigh_seq_state *state = seq->private;
2558 void *rc;
2559 loff_t idxpos = *pos;
2560
2561 rc = neigh_get_idx(seq, &idxpos);
2562 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2563 rc = pneigh_get_idx(seq, &idxpos);
2564
2565 return rc;
2566}
2567
2568void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2569 __acquires(rcu_bh)
2570{
2571 struct neigh_seq_state *state = seq->private;
2572
2573 state->tbl = tbl;
2574 state->bucket = 0;
2575 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2576
2577 rcu_read_lock_bh();
2578 state->nht = rcu_dereference_bh(tbl->nht);
2579
2580 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2581}
2582EXPORT_SYMBOL(neigh_seq_start);
2583
2584void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2585{
2586 struct neigh_seq_state *state;
2587 void *rc;
2588
2589 if (v == SEQ_START_TOKEN) {
2590 rc = neigh_get_first(seq);
2591 goto out;
2592 }
2593
2594 state = seq->private;
2595 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2596 rc = neigh_get_next(seq, v, NULL);
2597 if (rc)
2598 goto out;
2599 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2600 rc = pneigh_get_first(seq);
2601 } else {
2602 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2603 rc = pneigh_get_next(seq, v, NULL);
2604 }
2605out:
2606 ++(*pos);
2607 return rc;
2608}
2609EXPORT_SYMBOL(neigh_seq_next);
2610
2611void neigh_seq_stop(struct seq_file *seq, void *v)
2612 __releases(rcu_bh)
2613{
2614 rcu_read_unlock_bh();
2615}
2616EXPORT_SYMBOL(neigh_seq_stop);
2617
2618/* statistics via seq_file */
2619
2620static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2621{
2622 struct neigh_table *tbl = seq->private;
2623 int cpu;
2624
2625 if (*pos == 0)
2626 return SEQ_START_TOKEN;
2627
2628 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2629 if (!cpu_possible(cpu))
2630 continue;
2631 *pos = cpu+1;
2632 return per_cpu_ptr(tbl->stats, cpu);
2633 }
2634 return NULL;
2635}
2636
2637static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2638{
2639 struct neigh_table *tbl = seq->private;
2640 int cpu;
2641
2642 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2643 if (!cpu_possible(cpu))
2644 continue;
2645 *pos = cpu+1;
2646 return per_cpu_ptr(tbl->stats, cpu);
2647 }
2648 return NULL;
2649}
2650
2651static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2652{
2653
2654}
2655
2656static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2657{
2658 struct neigh_table *tbl = seq->private;
2659 struct neigh_statistics *st = v;
2660
2661 if (v == SEQ_START_TOKEN) {
2662 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2663 return 0;
2664 }
2665
2666 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2667 "%08lx %08lx %08lx %08lx %08lx\n",
2668 atomic_read(&tbl->entries),
2669
2670 st->allocs,
2671 st->destroys,
2672 st->hash_grows,
2673
2674 st->lookups,
2675 st->hits,
2676
2677 st->res_failed,
2678
2679 st->rcv_probes_mcast,
2680 st->rcv_probes_ucast,
2681
2682 st->periodic_gc_runs,
2683 st->forced_gc_runs,
2684 st->unres_discards
2685 );
2686
2687 return 0;
2688}
2689
2690static const struct seq_operations neigh_stat_seq_ops = {
2691 .start = neigh_stat_seq_start,
2692 .next = neigh_stat_seq_next,
2693 .stop = neigh_stat_seq_stop,
2694 .show = neigh_stat_seq_show,
2695};
2696
2697static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2698{
2699 int ret = seq_open(file, &neigh_stat_seq_ops);
2700
2701 if (!ret) {
2702 struct seq_file *sf = file->private_data;
2703 sf->private = PDE(inode)->data;
2704 }
2705 return ret;
2706};
2707
2708static const struct file_operations neigh_stat_seq_fops = {
2709 .owner = THIS_MODULE,
2710 .open = neigh_stat_seq_open,
2711 .read = seq_read,
2712 .llseek = seq_lseek,
2713 .release = seq_release,
2714};
2715
2716#endif /* CONFIG_PROC_FS */
2717
2718static inline size_t neigh_nlmsg_size(void)
2719{
2720 return NLMSG_ALIGN(sizeof(struct ndmsg))
2721 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2722 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2723 + nla_total_size(sizeof(struct nda_cacheinfo))
2724 + nla_total_size(4); /* NDA_PROBES */
2725}
2726
2727static void __neigh_notify(struct neighbour *n, int type, int flags)
2728{
2729 struct net *net = dev_net(n->dev);
2730 struct sk_buff *skb;
2731 int err = -ENOBUFS;
2732
2733 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2734 if (skb == NULL)
2735 goto errout;
2736
2737 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2738 if (err < 0) {
2739 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2740 WARN_ON(err == -EMSGSIZE);
2741 kfree_skb(skb);
2742 goto errout;
2743 }
2744 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2745 return;
2746errout:
2747 if (err < 0)
2748 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2749}
2750
2751#ifdef CONFIG_ARPD
2752void neigh_app_ns(struct neighbour *n)
2753{
2754 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2755}
2756EXPORT_SYMBOL(neigh_app_ns);
2757#endif /* CONFIG_ARPD */
2758
2759#ifdef CONFIG_SYSCTL
2760
2761static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2762 size_t *lenp, loff_t *ppos)
2763{
2764 int size, ret;
2765 ctl_table tmp = *ctl;
2766
2767 tmp.data = &size;
2768 size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
2769 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
2770 if (write && !ret)
2771 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2772 return ret;
2773}
2774
2775enum {
2776 NEIGH_VAR_MCAST_PROBE,
2777 NEIGH_VAR_UCAST_PROBE,
2778 NEIGH_VAR_APP_PROBE,
2779 NEIGH_VAR_RETRANS_TIME,
2780 NEIGH_VAR_BASE_REACHABLE_TIME,
2781 NEIGH_VAR_DELAY_PROBE_TIME,
2782 NEIGH_VAR_GC_STALETIME,
2783 NEIGH_VAR_QUEUE_LEN,
2784 NEIGH_VAR_QUEUE_LEN_BYTES,
2785 NEIGH_VAR_PROXY_QLEN,
2786 NEIGH_VAR_ANYCAST_DELAY,
2787 NEIGH_VAR_PROXY_DELAY,
2788 NEIGH_VAR_LOCKTIME,
2789 NEIGH_VAR_RETRANS_TIME_MS,
2790 NEIGH_VAR_BASE_REACHABLE_TIME_MS,
2791 NEIGH_VAR_GC_INTERVAL,
2792 NEIGH_VAR_GC_THRESH1,
2793 NEIGH_VAR_GC_THRESH2,
2794 NEIGH_VAR_GC_THRESH3,
2795 NEIGH_VAR_MAX
2796};
2797
2798static struct neigh_sysctl_table {
2799 struct ctl_table_header *sysctl_header;
2800 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2801} neigh_sysctl_template __read_mostly = {
2802 .neigh_vars = {
2803 [NEIGH_VAR_MCAST_PROBE] = {
2804 .procname = "mcast_solicit",
2805 .maxlen = sizeof(int),
2806 .mode = 0644,
2807 .proc_handler = proc_dointvec,
2808 },
2809 [NEIGH_VAR_UCAST_PROBE] = {
2810 .procname = "ucast_solicit",
2811 .maxlen = sizeof(int),
2812 .mode = 0644,
2813 .proc_handler = proc_dointvec,
2814 },
2815 [NEIGH_VAR_APP_PROBE] = {
2816 .procname = "app_solicit",
2817 .maxlen = sizeof(int),
2818 .mode = 0644,
2819 .proc_handler = proc_dointvec,
2820 },
2821 [NEIGH_VAR_RETRANS_TIME] = {
2822 .procname = "retrans_time",
2823 .maxlen = sizeof(int),
2824 .mode = 0644,
2825 .proc_handler = proc_dointvec_userhz_jiffies,
2826 },
2827 [NEIGH_VAR_BASE_REACHABLE_TIME] = {
2828 .procname = "base_reachable_time",
2829 .maxlen = sizeof(int),
2830 .mode = 0644,
2831 .proc_handler = proc_dointvec_jiffies,
2832 },
2833 [NEIGH_VAR_DELAY_PROBE_TIME] = {
2834 .procname = "delay_first_probe_time",
2835 .maxlen = sizeof(int),
2836 .mode = 0644,
2837 .proc_handler = proc_dointvec_jiffies,
2838 },
2839 [NEIGH_VAR_GC_STALETIME] = {
2840 .procname = "gc_stale_time",
2841 .maxlen = sizeof(int),
2842 .mode = 0644,
2843 .proc_handler = proc_dointvec_jiffies,
2844 },
2845 [NEIGH_VAR_QUEUE_LEN] = {
2846 .procname = "unres_qlen",
2847 .maxlen = sizeof(int),
2848 .mode = 0644,
2849 .proc_handler = proc_unres_qlen,
2850 },
2851 [NEIGH_VAR_QUEUE_LEN_BYTES] = {
2852 .procname = "unres_qlen_bytes",
2853 .maxlen = sizeof(int),
2854 .mode = 0644,
2855 .proc_handler = proc_dointvec,
2856 },
2857 [NEIGH_VAR_PROXY_QLEN] = {
2858 .procname = "proxy_qlen",
2859 .maxlen = sizeof(int),
2860 .mode = 0644,
2861 .proc_handler = proc_dointvec,
2862 },
2863 [NEIGH_VAR_ANYCAST_DELAY] = {
2864 .procname = "anycast_delay",
2865 .maxlen = sizeof(int),
2866 .mode = 0644,
2867 .proc_handler = proc_dointvec_userhz_jiffies,
2868 },
2869 [NEIGH_VAR_PROXY_DELAY] = {
2870 .procname = "proxy_delay",
2871 .maxlen = sizeof(int),
2872 .mode = 0644,
2873 .proc_handler = proc_dointvec_userhz_jiffies,
2874 },
2875 [NEIGH_VAR_LOCKTIME] = {
2876 .procname = "locktime",
2877 .maxlen = sizeof(int),
2878 .mode = 0644,
2879 .proc_handler = proc_dointvec_userhz_jiffies,
2880 },
2881 [NEIGH_VAR_RETRANS_TIME_MS] = {
2882 .procname = "retrans_time_ms",
2883 .maxlen = sizeof(int),
2884 .mode = 0644,
2885 .proc_handler = proc_dointvec_ms_jiffies,
2886 },
2887 [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
2888 .procname = "base_reachable_time_ms",
2889 .maxlen = sizeof(int),
2890 .mode = 0644,
2891 .proc_handler = proc_dointvec_ms_jiffies,
2892 },
2893 [NEIGH_VAR_GC_INTERVAL] = {
2894 .procname = "gc_interval",
2895 .maxlen = sizeof(int),
2896 .mode = 0644,
2897 .proc_handler = proc_dointvec_jiffies,
2898 },
2899 [NEIGH_VAR_GC_THRESH1] = {
2900 .procname = "gc_thresh1",
2901 .maxlen = sizeof(int),
2902 .mode = 0644,
2903 .proc_handler = proc_dointvec,
2904 },
2905 [NEIGH_VAR_GC_THRESH2] = {
2906 .procname = "gc_thresh2",
2907 .maxlen = sizeof(int),
2908 .mode = 0644,
2909 .proc_handler = proc_dointvec,
2910 },
2911 [NEIGH_VAR_GC_THRESH3] = {
2912 .procname = "gc_thresh3",
2913 .maxlen = sizeof(int),
2914 .mode = 0644,
2915 .proc_handler = proc_dointvec,
2916 },
2917 {},
2918 },
2919};
2920
2921int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2922 char *p_name, proc_handler *handler)
2923{
2924 struct neigh_sysctl_table *t;
2925 const char *dev_name_source = NULL;
2926 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
2927
2928 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2929 if (!t)
2930 goto err;
2931
2932 t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
2933 t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
2934 t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
2935 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
2936 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
2937 t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
2938 t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
2939 t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
2940 t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
2941 t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
2942 t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
2943 t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
2944 t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
2945 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
2946 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
2947
2948 if (dev) {
2949 dev_name_source = dev->name;
2950 /* Terminate the table early */
2951 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2952 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2953 } else {
2954 dev_name_source = "default";
2955 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2956 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2957 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
2958 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
2959 }
2960
2961
2962 if (handler) {
2963 /* RetransTime */
2964 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
2965 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
2966 /* ReachableTime */
2967 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
2968 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
2969 /* RetransTime (in milliseconds)*/
2970 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
2971 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
2972 /* ReachableTime (in milliseconds) */
2973 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
2974 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2975 }
2976
2977 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
2978 p_name, dev_name_source);
2979 t->sysctl_header =
2980 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
2981 if (!t->sysctl_header)
2982 goto free;
2983
2984 p->sysctl_table = t;
2985 return 0;
2986
2987free:
2988 kfree(t);
2989err:
2990 return -ENOBUFS;
2991}
2992EXPORT_SYMBOL(neigh_sysctl_register);
2993
2994void neigh_sysctl_unregister(struct neigh_parms *p)
2995{
2996 if (p->sysctl_table) {
2997 struct neigh_sysctl_table *t = p->sysctl_table;
2998 p->sysctl_table = NULL;
2999 unregister_net_sysctl_table(t->sysctl_header);
3000 kfree(t);
3001 }
3002}
3003EXPORT_SYMBOL(neigh_sysctl_unregister);
3004
3005#endif /* CONFIG_SYSCTL */
3006
3007static int __init neigh_init(void)
3008{
3009 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3010 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3011 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3012
3013 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3014 NULL);
3015 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3016
3017 return 0;
3018}
3019
3020subsys_initcall(neigh_init);
3021
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/slab.h>
17#include <linux/kmemleak.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
22#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
28#include <net/net_namespace.h>
29#include <net/neighbour.h>
30#include <net/arp.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/netevent.h>
34#include <net/netlink.h>
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
37#include <linux/string.h>
38#include <linux/log2.h>
39#include <linux/inetdevice.h>
40#include <net/addrconf.h>
41
42#include <trace/events/neigh.h>
43
44#define DEBUG
45#define NEIGH_DEBUG 1
46#define neigh_dbg(level, fmt, ...) \
47do { \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
50} while (0)
51
52#define PNEIGH_HASHMASK 0xF
53
54static void neigh_timer_handler(struct timer_list *t);
55static void __neigh_notify(struct neighbour *n, int type, int flags,
56 u32 pid);
57static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 struct net_device *dev);
60
61#ifdef CONFIG_PROC_FS
62static const struct seq_operations neigh_stat_seq_ops;
63#endif
64
65/*
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
67
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
72 cache.
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
75
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
79
80 Reference count prevents destruction.
81
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
84 - timer
85 - resolution queue
86
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
91 */
92
93static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94{
95 kfree_skb(skb);
96 return -ENETDOWN;
97}
98
99static void neigh_cleanup_and_release(struct neighbour *neigh)
100{
101 if (neigh->parms->neigh_cleanup)
102 neigh->parms->neigh_cleanup(neigh);
103
104 trace_neigh_cleanup_and_release(neigh, 0);
105 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
106 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
107 neigh_release(neigh);
108}
109
110/*
111 * It is random distribution in the interval (1/2)*base...(3/2)*base.
112 * It corresponds to default IPv6 settings and is not overridable,
113 * because it is really reasonable choice.
114 */
115
116unsigned long neigh_rand_reach_time(unsigned long base)
117{
118 return base ? (prandom_u32() % base) + (base >> 1) : 0;
119}
120EXPORT_SYMBOL(neigh_rand_reach_time);
121
122static void neigh_mark_dead(struct neighbour *n)
123{
124 n->dead = 1;
125 if (!list_empty(&n->gc_list)) {
126 list_del_init(&n->gc_list);
127 atomic_dec(&n->tbl->gc_entries);
128 }
129}
130
131static void neigh_update_gc_list(struct neighbour *n)
132{
133 bool on_gc_list, exempt_from_gc;
134
135 write_lock_bh(&n->tbl->lock);
136 write_lock(&n->lock);
137
138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
140 */
141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
143 on_gc_list = !list_empty(&n->gc_list);
144
145 if (exempt_from_gc && on_gc_list) {
146 list_del_init(&n->gc_list);
147 atomic_dec(&n->tbl->gc_entries);
148 } else if (!exempt_from_gc && !on_gc_list) {
149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
152 }
153
154 write_unlock(&n->lock);
155 write_unlock_bh(&n->tbl->lock);
156}
157
158static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
159 int *notify)
160{
161 bool rc = false;
162 u8 ndm_flags;
163
164 if (!(flags & NEIGH_UPDATE_F_ADMIN))
165 return rc;
166
167 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
168 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
169 if (ndm_flags & NTF_EXT_LEARNED)
170 neigh->flags |= NTF_EXT_LEARNED;
171 else
172 neigh->flags &= ~NTF_EXT_LEARNED;
173 rc = true;
174 *notify = 1;
175 }
176
177 return rc;
178}
179
180static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
181 struct neigh_table *tbl)
182{
183 bool retval = false;
184
185 write_lock(&n->lock);
186 if (refcount_read(&n->refcnt) == 1) {
187 struct neighbour *neigh;
188
189 neigh = rcu_dereference_protected(n->next,
190 lockdep_is_held(&tbl->lock));
191 rcu_assign_pointer(*np, neigh);
192 neigh_mark_dead(n);
193 retval = true;
194 }
195 write_unlock(&n->lock);
196 if (retval)
197 neigh_cleanup_and_release(n);
198 return retval;
199}
200
201bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
202{
203 struct neigh_hash_table *nht;
204 void *pkey = ndel->primary_key;
205 u32 hash_val;
206 struct neighbour *n;
207 struct neighbour __rcu **np;
208
209 nht = rcu_dereference_protected(tbl->nht,
210 lockdep_is_held(&tbl->lock));
211 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
212 hash_val = hash_val >> (32 - nht->hash_shift);
213
214 np = &nht->hash_buckets[hash_val];
215 while ((n = rcu_dereference_protected(*np,
216 lockdep_is_held(&tbl->lock)))) {
217 if (n == ndel)
218 return neigh_del(n, np, tbl);
219 np = &n->next;
220 }
221 return false;
222}
223
224static int neigh_forced_gc(struct neigh_table *tbl)
225{
226 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
227 unsigned long tref = jiffies - 5 * HZ;
228 struct neighbour *n, *tmp;
229 int shrunk = 0;
230
231 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
232
233 write_lock_bh(&tbl->lock);
234
235 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
236 if (refcount_read(&n->refcnt) == 1) {
237 bool remove = false;
238
239 write_lock(&n->lock);
240 if ((n->nud_state == NUD_FAILED) ||
241 time_after(tref, n->updated))
242 remove = true;
243 write_unlock(&n->lock);
244
245 if (remove && neigh_remove_one(n, tbl))
246 shrunk++;
247 if (shrunk >= max_clean)
248 break;
249 }
250 }
251
252 tbl->last_flush = jiffies;
253
254 write_unlock_bh(&tbl->lock);
255
256 return shrunk;
257}
258
259static void neigh_add_timer(struct neighbour *n, unsigned long when)
260{
261 neigh_hold(n);
262 if (unlikely(mod_timer(&n->timer, when))) {
263 printk("NEIGH: BUG, double timer add, state is %x\n",
264 n->nud_state);
265 dump_stack();
266 }
267}
268
269static int neigh_del_timer(struct neighbour *n)
270{
271 if ((n->nud_state & NUD_IN_TIMER) &&
272 del_timer(&n->timer)) {
273 neigh_release(n);
274 return 1;
275 }
276 return 0;
277}
278
279static void pneigh_queue_purge(struct sk_buff_head *list)
280{
281 struct sk_buff *skb;
282
283 while ((skb = skb_dequeue(list)) != NULL) {
284 dev_put(skb->dev);
285 kfree_skb(skb);
286 }
287}
288
289static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
290 bool skip_perm)
291{
292 int i;
293 struct neigh_hash_table *nht;
294
295 nht = rcu_dereference_protected(tbl->nht,
296 lockdep_is_held(&tbl->lock));
297
298 for (i = 0; i < (1 << nht->hash_shift); i++) {
299 struct neighbour *n;
300 struct neighbour __rcu **np = &nht->hash_buckets[i];
301
302 while ((n = rcu_dereference_protected(*np,
303 lockdep_is_held(&tbl->lock))) != NULL) {
304 if (dev && n->dev != dev) {
305 np = &n->next;
306 continue;
307 }
308 if (skip_perm && n->nud_state & NUD_PERMANENT) {
309 np = &n->next;
310 continue;
311 }
312 rcu_assign_pointer(*np,
313 rcu_dereference_protected(n->next,
314 lockdep_is_held(&tbl->lock)));
315 write_lock(&n->lock);
316 neigh_del_timer(n);
317 neigh_mark_dead(n);
318 if (refcount_read(&n->refcnt) != 1) {
319 /* The most unpleasant situation.
320 We must destroy neighbour entry,
321 but someone still uses it.
322
323 The destroy will be delayed until
324 the last user releases us, but
325 we must kill timers etc. and move
326 it to safe state.
327 */
328 __skb_queue_purge(&n->arp_queue);
329 n->arp_queue_len_bytes = 0;
330 n->output = neigh_blackhole;
331 if (n->nud_state & NUD_VALID)
332 n->nud_state = NUD_NOARP;
333 else
334 n->nud_state = NUD_NONE;
335 neigh_dbg(2, "neigh %p is stray\n", n);
336 }
337 write_unlock(&n->lock);
338 neigh_cleanup_and_release(n);
339 }
340 }
341}
342
343void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
344{
345 write_lock_bh(&tbl->lock);
346 neigh_flush_dev(tbl, dev, false);
347 write_unlock_bh(&tbl->lock);
348}
349EXPORT_SYMBOL(neigh_changeaddr);
350
351static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
352 bool skip_perm)
353{
354 write_lock_bh(&tbl->lock);
355 neigh_flush_dev(tbl, dev, skip_perm);
356 pneigh_ifdown_and_unlock(tbl, dev);
357
358 del_timer_sync(&tbl->proxy_timer);
359 pneigh_queue_purge(&tbl->proxy_queue);
360 return 0;
361}
362
363int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
364{
365 __neigh_ifdown(tbl, dev, true);
366 return 0;
367}
368EXPORT_SYMBOL(neigh_carrier_down);
369
370int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
371{
372 __neigh_ifdown(tbl, dev, false);
373 return 0;
374}
375EXPORT_SYMBOL(neigh_ifdown);
376
377static struct neighbour *neigh_alloc(struct neigh_table *tbl,
378 struct net_device *dev,
379 bool exempt_from_gc)
380{
381 struct neighbour *n = NULL;
382 unsigned long now = jiffies;
383 int entries;
384
385 if (exempt_from_gc)
386 goto do_alloc;
387
388 entries = atomic_inc_return(&tbl->gc_entries) - 1;
389 if (entries >= tbl->gc_thresh3 ||
390 (entries >= tbl->gc_thresh2 &&
391 time_after(now, tbl->last_flush + 5 * HZ))) {
392 if (!neigh_forced_gc(tbl) &&
393 entries >= tbl->gc_thresh3) {
394 net_info_ratelimited("%s: neighbor table overflow!\n",
395 tbl->id);
396 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
397 goto out_entries;
398 }
399 }
400
401do_alloc:
402 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
403 if (!n)
404 goto out_entries;
405
406 __skb_queue_head_init(&n->arp_queue);
407 rwlock_init(&n->lock);
408 seqlock_init(&n->ha_lock);
409 n->updated = n->used = now;
410 n->nud_state = NUD_NONE;
411 n->output = neigh_blackhole;
412 seqlock_init(&n->hh.hh_lock);
413 n->parms = neigh_parms_clone(&tbl->parms);
414 timer_setup(&n->timer, neigh_timer_handler, 0);
415
416 NEIGH_CACHE_STAT_INC(tbl, allocs);
417 n->tbl = tbl;
418 refcount_set(&n->refcnt, 1);
419 n->dead = 1;
420 INIT_LIST_HEAD(&n->gc_list);
421
422 atomic_inc(&tbl->entries);
423out:
424 return n;
425
426out_entries:
427 if (!exempt_from_gc)
428 atomic_dec(&tbl->gc_entries);
429 goto out;
430}
431
432static void neigh_get_hash_rnd(u32 *x)
433{
434 *x = get_random_u32() | 1;
435}
436
437static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
438{
439 size_t size = (1 << shift) * sizeof(struct neighbour *);
440 struct neigh_hash_table *ret;
441 struct neighbour __rcu **buckets;
442 int i;
443
444 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
445 if (!ret)
446 return NULL;
447 if (size <= PAGE_SIZE) {
448 buckets = kzalloc(size, GFP_ATOMIC);
449 } else {
450 buckets = (struct neighbour __rcu **)
451 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
452 get_order(size));
453 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
454 }
455 if (!buckets) {
456 kfree(ret);
457 return NULL;
458 }
459 ret->hash_buckets = buckets;
460 ret->hash_shift = shift;
461 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
462 neigh_get_hash_rnd(&ret->hash_rnd[i]);
463 return ret;
464}
465
466static void neigh_hash_free_rcu(struct rcu_head *head)
467{
468 struct neigh_hash_table *nht = container_of(head,
469 struct neigh_hash_table,
470 rcu);
471 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
472 struct neighbour __rcu **buckets = nht->hash_buckets;
473
474 if (size <= PAGE_SIZE) {
475 kfree(buckets);
476 } else {
477 kmemleak_free(buckets);
478 free_pages((unsigned long)buckets, get_order(size));
479 }
480 kfree(nht);
481}
482
483static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
484 unsigned long new_shift)
485{
486 unsigned int i, hash;
487 struct neigh_hash_table *new_nht, *old_nht;
488
489 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
490
491 old_nht = rcu_dereference_protected(tbl->nht,
492 lockdep_is_held(&tbl->lock));
493 new_nht = neigh_hash_alloc(new_shift);
494 if (!new_nht)
495 return old_nht;
496
497 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
498 struct neighbour *n, *next;
499
500 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
501 lockdep_is_held(&tbl->lock));
502 n != NULL;
503 n = next) {
504 hash = tbl->hash(n->primary_key, n->dev,
505 new_nht->hash_rnd);
506
507 hash >>= (32 - new_nht->hash_shift);
508 next = rcu_dereference_protected(n->next,
509 lockdep_is_held(&tbl->lock));
510
511 rcu_assign_pointer(n->next,
512 rcu_dereference_protected(
513 new_nht->hash_buckets[hash],
514 lockdep_is_held(&tbl->lock)));
515 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
516 }
517 }
518
519 rcu_assign_pointer(tbl->nht, new_nht);
520 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
521 return new_nht;
522}
523
524struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
525 struct net_device *dev)
526{
527 struct neighbour *n;
528
529 NEIGH_CACHE_STAT_INC(tbl, lookups);
530
531 rcu_read_lock_bh();
532 n = __neigh_lookup_noref(tbl, pkey, dev);
533 if (n) {
534 if (!refcount_inc_not_zero(&n->refcnt))
535 n = NULL;
536 NEIGH_CACHE_STAT_INC(tbl, hits);
537 }
538
539 rcu_read_unlock_bh();
540 return n;
541}
542EXPORT_SYMBOL(neigh_lookup);
543
544struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
545 const void *pkey)
546{
547 struct neighbour *n;
548 unsigned int key_len = tbl->key_len;
549 u32 hash_val;
550 struct neigh_hash_table *nht;
551
552 NEIGH_CACHE_STAT_INC(tbl, lookups);
553
554 rcu_read_lock_bh();
555 nht = rcu_dereference_bh(tbl->nht);
556 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
557
558 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
559 n != NULL;
560 n = rcu_dereference_bh(n->next)) {
561 if (!memcmp(n->primary_key, pkey, key_len) &&
562 net_eq(dev_net(n->dev), net)) {
563 if (!refcount_inc_not_zero(&n->refcnt))
564 n = NULL;
565 NEIGH_CACHE_STAT_INC(tbl, hits);
566 break;
567 }
568 }
569
570 rcu_read_unlock_bh();
571 return n;
572}
573EXPORT_SYMBOL(neigh_lookup_nodev);
574
575static struct neighbour *___neigh_create(struct neigh_table *tbl,
576 const void *pkey,
577 struct net_device *dev,
578 bool exempt_from_gc, bool want_ref)
579{
580 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
581 u32 hash_val;
582 unsigned int key_len = tbl->key_len;
583 int error;
584 struct neigh_hash_table *nht;
585
586 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
587
588 if (!n) {
589 rc = ERR_PTR(-ENOBUFS);
590 goto out;
591 }
592
593 memcpy(n->primary_key, pkey, key_len);
594 n->dev = dev;
595 dev_hold(dev);
596
597 /* Protocol specific setup. */
598 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
599 rc = ERR_PTR(error);
600 goto out_neigh_release;
601 }
602
603 if (dev->netdev_ops->ndo_neigh_construct) {
604 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
605 if (error < 0) {
606 rc = ERR_PTR(error);
607 goto out_neigh_release;
608 }
609 }
610
611 /* Device specific setup. */
612 if (n->parms->neigh_setup &&
613 (error = n->parms->neigh_setup(n)) < 0) {
614 rc = ERR_PTR(error);
615 goto out_neigh_release;
616 }
617
618 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
619
620 write_lock_bh(&tbl->lock);
621 nht = rcu_dereference_protected(tbl->nht,
622 lockdep_is_held(&tbl->lock));
623
624 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
625 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
626
627 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
628
629 if (n->parms->dead) {
630 rc = ERR_PTR(-EINVAL);
631 goto out_tbl_unlock;
632 }
633
634 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
635 lockdep_is_held(&tbl->lock));
636 n1 != NULL;
637 n1 = rcu_dereference_protected(n1->next,
638 lockdep_is_held(&tbl->lock))) {
639 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
640 if (want_ref)
641 neigh_hold(n1);
642 rc = n1;
643 goto out_tbl_unlock;
644 }
645 }
646
647 n->dead = 0;
648 if (!exempt_from_gc)
649 list_add_tail(&n->gc_list, &n->tbl->gc_list);
650
651 if (want_ref)
652 neigh_hold(n);
653 rcu_assign_pointer(n->next,
654 rcu_dereference_protected(nht->hash_buckets[hash_val],
655 lockdep_is_held(&tbl->lock)));
656 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
657 write_unlock_bh(&tbl->lock);
658 neigh_dbg(2, "neigh %p is created\n", n);
659 rc = n;
660out:
661 return rc;
662out_tbl_unlock:
663 write_unlock_bh(&tbl->lock);
664out_neigh_release:
665 if (!exempt_from_gc)
666 atomic_dec(&tbl->gc_entries);
667 neigh_release(n);
668 goto out;
669}
670
671struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
672 struct net_device *dev, bool want_ref)
673{
674 return ___neigh_create(tbl, pkey, dev, false, want_ref);
675}
676EXPORT_SYMBOL(__neigh_create);
677
678static u32 pneigh_hash(const void *pkey, unsigned int key_len)
679{
680 u32 hash_val = *(u32 *)(pkey + key_len - 4);
681 hash_val ^= (hash_val >> 16);
682 hash_val ^= hash_val >> 8;
683 hash_val ^= hash_val >> 4;
684 hash_val &= PNEIGH_HASHMASK;
685 return hash_val;
686}
687
688static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
689 struct net *net,
690 const void *pkey,
691 unsigned int key_len,
692 struct net_device *dev)
693{
694 while (n) {
695 if (!memcmp(n->key, pkey, key_len) &&
696 net_eq(pneigh_net(n), net) &&
697 (n->dev == dev || !n->dev))
698 return n;
699 n = n->next;
700 }
701 return NULL;
702}
703
704struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
705 struct net *net, const void *pkey, struct net_device *dev)
706{
707 unsigned int key_len = tbl->key_len;
708 u32 hash_val = pneigh_hash(pkey, key_len);
709
710 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
711 net, pkey, key_len, dev);
712}
713EXPORT_SYMBOL_GPL(__pneigh_lookup);
714
715struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
716 struct net *net, const void *pkey,
717 struct net_device *dev, int creat)
718{
719 struct pneigh_entry *n;
720 unsigned int key_len = tbl->key_len;
721 u32 hash_val = pneigh_hash(pkey, key_len);
722
723 read_lock_bh(&tbl->lock);
724 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
725 net, pkey, key_len, dev);
726 read_unlock_bh(&tbl->lock);
727
728 if (n || !creat)
729 goto out;
730
731 ASSERT_RTNL();
732
733 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
734 if (!n)
735 goto out;
736
737 n->protocol = 0;
738 write_pnet(&n->net, net);
739 memcpy(n->key, pkey, key_len);
740 n->dev = dev;
741 if (dev)
742 dev_hold(dev);
743
744 if (tbl->pconstructor && tbl->pconstructor(n)) {
745 if (dev)
746 dev_put(dev);
747 kfree(n);
748 n = NULL;
749 goto out;
750 }
751
752 write_lock_bh(&tbl->lock);
753 n->next = tbl->phash_buckets[hash_val];
754 tbl->phash_buckets[hash_val] = n;
755 write_unlock_bh(&tbl->lock);
756out:
757 return n;
758}
759EXPORT_SYMBOL(pneigh_lookup);
760
761
762int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
763 struct net_device *dev)
764{
765 struct pneigh_entry *n, **np;
766 unsigned int key_len = tbl->key_len;
767 u32 hash_val = pneigh_hash(pkey, key_len);
768
769 write_lock_bh(&tbl->lock);
770 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
771 np = &n->next) {
772 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
773 net_eq(pneigh_net(n), net)) {
774 *np = n->next;
775 write_unlock_bh(&tbl->lock);
776 if (tbl->pdestructor)
777 tbl->pdestructor(n);
778 if (n->dev)
779 dev_put(n->dev);
780 kfree(n);
781 return 0;
782 }
783 }
784 write_unlock_bh(&tbl->lock);
785 return -ENOENT;
786}
787
788static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
789 struct net_device *dev)
790{
791 struct pneigh_entry *n, **np, *freelist = NULL;
792 u32 h;
793
794 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
795 np = &tbl->phash_buckets[h];
796 while ((n = *np) != NULL) {
797 if (!dev || n->dev == dev) {
798 *np = n->next;
799 n->next = freelist;
800 freelist = n;
801 continue;
802 }
803 np = &n->next;
804 }
805 }
806 write_unlock_bh(&tbl->lock);
807 while ((n = freelist)) {
808 freelist = n->next;
809 n->next = NULL;
810 if (tbl->pdestructor)
811 tbl->pdestructor(n);
812 if (n->dev)
813 dev_put(n->dev);
814 kfree(n);
815 }
816 return -ENOENT;
817}
818
819static void neigh_parms_destroy(struct neigh_parms *parms);
820
821static inline void neigh_parms_put(struct neigh_parms *parms)
822{
823 if (refcount_dec_and_test(&parms->refcnt))
824 neigh_parms_destroy(parms);
825}
826
827/*
828 * neighbour must already be out of the table;
829 *
830 */
831void neigh_destroy(struct neighbour *neigh)
832{
833 struct net_device *dev = neigh->dev;
834
835 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
836
837 if (!neigh->dead) {
838 pr_warn("Destroying alive neighbour %p\n", neigh);
839 dump_stack();
840 return;
841 }
842
843 if (neigh_del_timer(neigh))
844 pr_warn("Impossible event\n");
845
846 write_lock_bh(&neigh->lock);
847 __skb_queue_purge(&neigh->arp_queue);
848 write_unlock_bh(&neigh->lock);
849 neigh->arp_queue_len_bytes = 0;
850
851 if (dev->netdev_ops->ndo_neigh_destroy)
852 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
853
854 dev_put(dev);
855 neigh_parms_put(neigh->parms);
856
857 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
858
859 atomic_dec(&neigh->tbl->entries);
860 kfree_rcu(neigh, rcu);
861}
862EXPORT_SYMBOL(neigh_destroy);
863
864/* Neighbour state is suspicious;
865 disable fast path.
866
867 Called with write_locked neigh.
868 */
869static void neigh_suspect(struct neighbour *neigh)
870{
871 neigh_dbg(2, "neigh %p is suspected\n", neigh);
872
873 neigh->output = neigh->ops->output;
874}
875
876/* Neighbour state is OK;
877 enable fast path.
878
879 Called with write_locked neigh.
880 */
881static void neigh_connect(struct neighbour *neigh)
882{
883 neigh_dbg(2, "neigh %p is connected\n", neigh);
884
885 neigh->output = neigh->ops->connected_output;
886}
887
888static void neigh_periodic_work(struct work_struct *work)
889{
890 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
891 struct neighbour *n;
892 struct neighbour __rcu **np;
893 unsigned int i;
894 struct neigh_hash_table *nht;
895
896 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
897
898 write_lock_bh(&tbl->lock);
899 nht = rcu_dereference_protected(tbl->nht,
900 lockdep_is_held(&tbl->lock));
901
902 /*
903 * periodically recompute ReachableTime from random function
904 */
905
906 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
907 struct neigh_parms *p;
908 tbl->last_rand = jiffies;
909 list_for_each_entry(p, &tbl->parms_list, list)
910 p->reachable_time =
911 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
912 }
913
914 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
915 goto out;
916
917 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
918 np = &nht->hash_buckets[i];
919
920 while ((n = rcu_dereference_protected(*np,
921 lockdep_is_held(&tbl->lock))) != NULL) {
922 unsigned int state;
923
924 write_lock(&n->lock);
925
926 state = n->nud_state;
927 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
928 (n->flags & NTF_EXT_LEARNED)) {
929 write_unlock(&n->lock);
930 goto next_elt;
931 }
932
933 if (time_before(n->used, n->confirmed))
934 n->used = n->confirmed;
935
936 if (refcount_read(&n->refcnt) == 1 &&
937 (state == NUD_FAILED ||
938 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
939 *np = n->next;
940 neigh_mark_dead(n);
941 write_unlock(&n->lock);
942 neigh_cleanup_and_release(n);
943 continue;
944 }
945 write_unlock(&n->lock);
946
947next_elt:
948 np = &n->next;
949 }
950 /*
951 * It's fine to release lock here, even if hash table
952 * grows while we are preempted.
953 */
954 write_unlock_bh(&tbl->lock);
955 cond_resched();
956 write_lock_bh(&tbl->lock);
957 nht = rcu_dereference_protected(tbl->nht,
958 lockdep_is_held(&tbl->lock));
959 }
960out:
961 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
962 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
963 * BASE_REACHABLE_TIME.
964 */
965 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
966 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
967 write_unlock_bh(&tbl->lock);
968}
969
970static __inline__ int neigh_max_probes(struct neighbour *n)
971{
972 struct neigh_parms *p = n->parms;
973 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
974 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
975 NEIGH_VAR(p, MCAST_PROBES));
976}
977
978static void neigh_invalidate(struct neighbour *neigh)
979 __releases(neigh->lock)
980 __acquires(neigh->lock)
981{
982 struct sk_buff *skb;
983
984 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
985 neigh_dbg(2, "neigh %p is failed\n", neigh);
986 neigh->updated = jiffies;
987
988 /* It is very thin place. report_unreachable is very complicated
989 routine. Particularly, it can hit the same neighbour entry!
990
991 So that, we try to be accurate and avoid dead loop. --ANK
992 */
993 while (neigh->nud_state == NUD_FAILED &&
994 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
995 write_unlock(&neigh->lock);
996 neigh->ops->error_report(neigh, skb);
997 write_lock(&neigh->lock);
998 }
999 __skb_queue_purge(&neigh->arp_queue);
1000 neigh->arp_queue_len_bytes = 0;
1001}
1002
1003static void neigh_probe(struct neighbour *neigh)
1004 __releases(neigh->lock)
1005{
1006 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1007 /* keep skb alive even if arp_queue overflows */
1008 if (skb)
1009 skb = skb_clone(skb, GFP_ATOMIC);
1010 write_unlock(&neigh->lock);
1011 if (neigh->ops->solicit)
1012 neigh->ops->solicit(neigh, skb);
1013 atomic_inc(&neigh->probes);
1014 consume_skb(skb);
1015}
1016
1017/* Called when a timer expires for a neighbour entry. */
1018
1019static void neigh_timer_handler(struct timer_list *t)
1020{
1021 unsigned long now, next;
1022 struct neighbour *neigh = from_timer(neigh, t, timer);
1023 unsigned int state;
1024 int notify = 0;
1025
1026 write_lock(&neigh->lock);
1027
1028 state = neigh->nud_state;
1029 now = jiffies;
1030 next = now + HZ;
1031
1032 if (!(state & NUD_IN_TIMER))
1033 goto out;
1034
1035 if (state & NUD_REACHABLE) {
1036 if (time_before_eq(now,
1037 neigh->confirmed + neigh->parms->reachable_time)) {
1038 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1039 next = neigh->confirmed + neigh->parms->reachable_time;
1040 } else if (time_before_eq(now,
1041 neigh->used +
1042 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1043 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1044 neigh->nud_state = NUD_DELAY;
1045 neigh->updated = jiffies;
1046 neigh_suspect(neigh);
1047 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1048 } else {
1049 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1050 neigh->nud_state = NUD_STALE;
1051 neigh->updated = jiffies;
1052 neigh_suspect(neigh);
1053 notify = 1;
1054 }
1055 } else if (state & NUD_DELAY) {
1056 if (time_before_eq(now,
1057 neigh->confirmed +
1058 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1059 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1060 neigh->nud_state = NUD_REACHABLE;
1061 neigh->updated = jiffies;
1062 neigh_connect(neigh);
1063 notify = 1;
1064 next = neigh->confirmed + neigh->parms->reachable_time;
1065 } else {
1066 neigh_dbg(2, "neigh %p is probed\n", neigh);
1067 neigh->nud_state = NUD_PROBE;
1068 neigh->updated = jiffies;
1069 atomic_set(&neigh->probes, 0);
1070 notify = 1;
1071 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
1072 }
1073 } else {
1074 /* NUD_PROBE|NUD_INCOMPLETE */
1075 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
1076 }
1077
1078 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1079 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1080 neigh->nud_state = NUD_FAILED;
1081 notify = 1;
1082 neigh_invalidate(neigh);
1083 goto out;
1084 }
1085
1086 if (neigh->nud_state & NUD_IN_TIMER) {
1087 if (time_before(next, jiffies + HZ/2))
1088 next = jiffies + HZ/2;
1089 if (!mod_timer(&neigh->timer, next))
1090 neigh_hold(neigh);
1091 }
1092 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1093 neigh_probe(neigh);
1094 } else {
1095out:
1096 write_unlock(&neigh->lock);
1097 }
1098
1099 if (notify)
1100 neigh_update_notify(neigh, 0);
1101
1102 trace_neigh_timer_handler(neigh, 0);
1103
1104 neigh_release(neigh);
1105}
1106
1107int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1108{
1109 int rc;
1110 bool immediate_probe = false;
1111
1112 write_lock_bh(&neigh->lock);
1113
1114 rc = 0;
1115 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1116 goto out_unlock_bh;
1117 if (neigh->dead)
1118 goto out_dead;
1119
1120 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1121 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1122 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1123 unsigned long next, now = jiffies;
1124
1125 atomic_set(&neigh->probes,
1126 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1127 neigh_del_timer(neigh);
1128 neigh->nud_state = NUD_INCOMPLETE;
1129 neigh->updated = now;
1130 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1131 HZ/2);
1132 neigh_add_timer(neigh, next);
1133 immediate_probe = true;
1134 } else {
1135 neigh->nud_state = NUD_FAILED;
1136 neigh->updated = jiffies;
1137 write_unlock_bh(&neigh->lock);
1138
1139 kfree_skb(skb);
1140 return 1;
1141 }
1142 } else if (neigh->nud_state & NUD_STALE) {
1143 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1144 neigh_del_timer(neigh);
1145 neigh->nud_state = NUD_DELAY;
1146 neigh->updated = jiffies;
1147 neigh_add_timer(neigh, jiffies +
1148 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1149 }
1150
1151 if (neigh->nud_state == NUD_INCOMPLETE) {
1152 if (skb) {
1153 while (neigh->arp_queue_len_bytes + skb->truesize >
1154 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1155 struct sk_buff *buff;
1156
1157 buff = __skb_dequeue(&neigh->arp_queue);
1158 if (!buff)
1159 break;
1160 neigh->arp_queue_len_bytes -= buff->truesize;
1161 kfree_skb(buff);
1162 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1163 }
1164 skb_dst_force(skb);
1165 __skb_queue_tail(&neigh->arp_queue, skb);
1166 neigh->arp_queue_len_bytes += skb->truesize;
1167 }
1168 rc = 1;
1169 }
1170out_unlock_bh:
1171 if (immediate_probe)
1172 neigh_probe(neigh);
1173 else
1174 write_unlock(&neigh->lock);
1175 local_bh_enable();
1176 trace_neigh_event_send_done(neigh, rc);
1177 return rc;
1178
1179out_dead:
1180 if (neigh->nud_state & NUD_STALE)
1181 goto out_unlock_bh;
1182 write_unlock_bh(&neigh->lock);
1183 kfree_skb(skb);
1184 trace_neigh_event_send_dead(neigh, 1);
1185 return 1;
1186}
1187EXPORT_SYMBOL(__neigh_event_send);
1188
1189static void neigh_update_hhs(struct neighbour *neigh)
1190{
1191 struct hh_cache *hh;
1192 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1193 = NULL;
1194
1195 if (neigh->dev->header_ops)
1196 update = neigh->dev->header_ops->cache_update;
1197
1198 if (update) {
1199 hh = &neigh->hh;
1200 if (hh->hh_len) {
1201 write_seqlock_bh(&hh->hh_lock);
1202 update(hh, neigh->dev, neigh->ha);
1203 write_sequnlock_bh(&hh->hh_lock);
1204 }
1205 }
1206}
1207
1208
1209
1210/* Generic update routine.
1211 -- lladdr is new lladdr or NULL, if it is not supplied.
1212 -- new is new state.
1213 -- flags
1214 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1215 if it is different.
1216 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1217 lladdr instead of overriding it
1218 if it is different.
1219 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1220
1221 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1222 NTF_ROUTER flag.
1223 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1224 a router.
1225
1226 Caller MUST hold reference count on the entry.
1227 */
1228
1229static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1230 u8 new, u32 flags, u32 nlmsg_pid,
1231 struct netlink_ext_ack *extack)
1232{
1233 bool ext_learn_change = false;
1234 u8 old;
1235 int err;
1236 int notify = 0;
1237 struct net_device *dev;
1238 int update_isrouter = 0;
1239
1240 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1241
1242 write_lock_bh(&neigh->lock);
1243
1244 dev = neigh->dev;
1245 old = neigh->nud_state;
1246 err = -EPERM;
1247
1248 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1249 (old & (NUD_NOARP | NUD_PERMANENT)))
1250 goto out;
1251 if (neigh->dead) {
1252 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1253 goto out;
1254 }
1255
1256 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify);
1257
1258 if (!(new & NUD_VALID)) {
1259 neigh_del_timer(neigh);
1260 if (old & NUD_CONNECTED)
1261 neigh_suspect(neigh);
1262 neigh->nud_state = new;
1263 err = 0;
1264 notify = old & NUD_VALID;
1265 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1266 (new & NUD_FAILED)) {
1267 neigh_invalidate(neigh);
1268 notify = 1;
1269 }
1270 goto out;
1271 }
1272
1273 /* Compare new lladdr with cached one */
1274 if (!dev->addr_len) {
1275 /* First case: device needs no address. */
1276 lladdr = neigh->ha;
1277 } else if (lladdr) {
1278 /* The second case: if something is already cached
1279 and a new address is proposed:
1280 - compare new & old
1281 - if they are different, check override flag
1282 */
1283 if ((old & NUD_VALID) &&
1284 !memcmp(lladdr, neigh->ha, dev->addr_len))
1285 lladdr = neigh->ha;
1286 } else {
1287 /* No address is supplied; if we know something,
1288 use it, otherwise discard the request.
1289 */
1290 err = -EINVAL;
1291 if (!(old & NUD_VALID)) {
1292 NL_SET_ERR_MSG(extack, "No link layer address given");
1293 goto out;
1294 }
1295 lladdr = neigh->ha;
1296 }
1297
1298 /* Update confirmed timestamp for neighbour entry after we
1299 * received ARP packet even if it doesn't change IP to MAC binding.
1300 */
1301 if (new & NUD_CONNECTED)
1302 neigh->confirmed = jiffies;
1303
1304 /* If entry was valid and address is not changed,
1305 do not change entry state, if new one is STALE.
1306 */
1307 err = 0;
1308 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1309 if (old & NUD_VALID) {
1310 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1311 update_isrouter = 0;
1312 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1313 (old & NUD_CONNECTED)) {
1314 lladdr = neigh->ha;
1315 new = NUD_STALE;
1316 } else
1317 goto out;
1318 } else {
1319 if (lladdr == neigh->ha && new == NUD_STALE &&
1320 !(flags & NEIGH_UPDATE_F_ADMIN))
1321 new = old;
1322 }
1323 }
1324
1325 /* Update timestamp only once we know we will make a change to the
1326 * neighbour entry. Otherwise we risk to move the locktime window with
1327 * noop updates and ignore relevant ARP updates.
1328 */
1329 if (new != old || lladdr != neigh->ha)
1330 neigh->updated = jiffies;
1331
1332 if (new != old) {
1333 neigh_del_timer(neigh);
1334 if (new & NUD_PROBE)
1335 atomic_set(&neigh->probes, 0);
1336 if (new & NUD_IN_TIMER)
1337 neigh_add_timer(neigh, (jiffies +
1338 ((new & NUD_REACHABLE) ?
1339 neigh->parms->reachable_time :
1340 0)));
1341 neigh->nud_state = new;
1342 notify = 1;
1343 }
1344
1345 if (lladdr != neigh->ha) {
1346 write_seqlock(&neigh->ha_lock);
1347 memcpy(&neigh->ha, lladdr, dev->addr_len);
1348 write_sequnlock(&neigh->ha_lock);
1349 neigh_update_hhs(neigh);
1350 if (!(new & NUD_CONNECTED))
1351 neigh->confirmed = jiffies -
1352 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1353 notify = 1;
1354 }
1355 if (new == old)
1356 goto out;
1357 if (new & NUD_CONNECTED)
1358 neigh_connect(neigh);
1359 else
1360 neigh_suspect(neigh);
1361 if (!(old & NUD_VALID)) {
1362 struct sk_buff *skb;
1363
1364 /* Again: avoid dead loop if something went wrong */
1365
1366 while (neigh->nud_state & NUD_VALID &&
1367 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1368 struct dst_entry *dst = skb_dst(skb);
1369 struct neighbour *n2, *n1 = neigh;
1370 write_unlock_bh(&neigh->lock);
1371
1372 rcu_read_lock();
1373
1374 /* Why not just use 'neigh' as-is? The problem is that
1375 * things such as shaper, eql, and sch_teql can end up
1376 * using alternative, different, neigh objects to output
1377 * the packet in the output path. So what we need to do
1378 * here is re-lookup the top-level neigh in the path so
1379 * we can reinject the packet there.
1380 */
1381 n2 = NULL;
1382 if (dst) {
1383 n2 = dst_neigh_lookup_skb(dst, skb);
1384 if (n2)
1385 n1 = n2;
1386 }
1387 n1->output(n1, skb);
1388 if (n2)
1389 neigh_release(n2);
1390 rcu_read_unlock();
1391
1392 write_lock_bh(&neigh->lock);
1393 }
1394 __skb_queue_purge(&neigh->arp_queue);
1395 neigh->arp_queue_len_bytes = 0;
1396 }
1397out:
1398 if (update_isrouter)
1399 neigh_update_is_router(neigh, flags, ¬ify);
1400 write_unlock_bh(&neigh->lock);
1401
1402 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1403 neigh_update_gc_list(neigh);
1404
1405 if (notify)
1406 neigh_update_notify(neigh, nlmsg_pid);
1407
1408 trace_neigh_update_done(neigh, err);
1409
1410 return err;
1411}
1412
1413int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1414 u32 flags, u32 nlmsg_pid)
1415{
1416 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1417}
1418EXPORT_SYMBOL(neigh_update);
1419
1420/* Update the neigh to listen temporarily for probe responses, even if it is
1421 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1422 */
1423void __neigh_set_probe_once(struct neighbour *neigh)
1424{
1425 if (neigh->dead)
1426 return;
1427 neigh->updated = jiffies;
1428 if (!(neigh->nud_state & NUD_FAILED))
1429 return;
1430 neigh->nud_state = NUD_INCOMPLETE;
1431 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1432 neigh_add_timer(neigh,
1433 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1434}
1435EXPORT_SYMBOL(__neigh_set_probe_once);
1436
1437struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1438 u8 *lladdr, void *saddr,
1439 struct net_device *dev)
1440{
1441 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1442 lladdr || !dev->addr_len);
1443 if (neigh)
1444 neigh_update(neigh, lladdr, NUD_STALE,
1445 NEIGH_UPDATE_F_OVERRIDE, 0);
1446 return neigh;
1447}
1448EXPORT_SYMBOL(neigh_event_ns);
1449
1450/* called with read_lock_bh(&n->lock); */
1451static void neigh_hh_init(struct neighbour *n)
1452{
1453 struct net_device *dev = n->dev;
1454 __be16 prot = n->tbl->protocol;
1455 struct hh_cache *hh = &n->hh;
1456
1457 write_lock_bh(&n->lock);
1458
1459 /* Only one thread can come in here and initialize the
1460 * hh_cache entry.
1461 */
1462 if (!hh->hh_len)
1463 dev->header_ops->cache(n, hh, prot);
1464
1465 write_unlock_bh(&n->lock);
1466}
1467
1468/* Slow and careful. */
1469
1470int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1471{
1472 int rc = 0;
1473
1474 if (!neigh_event_send(neigh, skb)) {
1475 int err;
1476 struct net_device *dev = neigh->dev;
1477 unsigned int seq;
1478
1479 if (dev->header_ops->cache && !neigh->hh.hh_len)
1480 neigh_hh_init(neigh);
1481
1482 do {
1483 __skb_pull(skb, skb_network_offset(skb));
1484 seq = read_seqbegin(&neigh->ha_lock);
1485 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1486 neigh->ha, NULL, skb->len);
1487 } while (read_seqretry(&neigh->ha_lock, seq));
1488
1489 if (err >= 0)
1490 rc = dev_queue_xmit(skb);
1491 else
1492 goto out_kfree_skb;
1493 }
1494out:
1495 return rc;
1496out_kfree_skb:
1497 rc = -EINVAL;
1498 kfree_skb(skb);
1499 goto out;
1500}
1501EXPORT_SYMBOL(neigh_resolve_output);
1502
1503/* As fast as possible without hh cache */
1504
1505int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1506{
1507 struct net_device *dev = neigh->dev;
1508 unsigned int seq;
1509 int err;
1510
1511 do {
1512 __skb_pull(skb, skb_network_offset(skb));
1513 seq = read_seqbegin(&neigh->ha_lock);
1514 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1515 neigh->ha, NULL, skb->len);
1516 } while (read_seqretry(&neigh->ha_lock, seq));
1517
1518 if (err >= 0)
1519 err = dev_queue_xmit(skb);
1520 else {
1521 err = -EINVAL;
1522 kfree_skb(skb);
1523 }
1524 return err;
1525}
1526EXPORT_SYMBOL(neigh_connected_output);
1527
1528int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1529{
1530 return dev_queue_xmit(skb);
1531}
1532EXPORT_SYMBOL(neigh_direct_output);
1533
1534static void neigh_proxy_process(struct timer_list *t)
1535{
1536 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1537 long sched_next = 0;
1538 unsigned long now = jiffies;
1539 struct sk_buff *skb, *n;
1540
1541 spin_lock(&tbl->proxy_queue.lock);
1542
1543 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1544 long tdif = NEIGH_CB(skb)->sched_next - now;
1545
1546 if (tdif <= 0) {
1547 struct net_device *dev = skb->dev;
1548
1549 __skb_unlink(skb, &tbl->proxy_queue);
1550 if (tbl->proxy_redo && netif_running(dev)) {
1551 rcu_read_lock();
1552 tbl->proxy_redo(skb);
1553 rcu_read_unlock();
1554 } else {
1555 kfree_skb(skb);
1556 }
1557
1558 dev_put(dev);
1559 } else if (!sched_next || tdif < sched_next)
1560 sched_next = tdif;
1561 }
1562 del_timer(&tbl->proxy_timer);
1563 if (sched_next)
1564 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1565 spin_unlock(&tbl->proxy_queue.lock);
1566}
1567
1568void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1569 struct sk_buff *skb)
1570{
1571 unsigned long now = jiffies;
1572
1573 unsigned long sched_next = now + (prandom_u32() %
1574 NEIGH_VAR(p, PROXY_DELAY));
1575
1576 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1577 kfree_skb(skb);
1578 return;
1579 }
1580
1581 NEIGH_CB(skb)->sched_next = sched_next;
1582 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1583
1584 spin_lock(&tbl->proxy_queue.lock);
1585 if (del_timer(&tbl->proxy_timer)) {
1586 if (time_before(tbl->proxy_timer.expires, sched_next))
1587 sched_next = tbl->proxy_timer.expires;
1588 }
1589 skb_dst_drop(skb);
1590 dev_hold(skb->dev);
1591 __skb_queue_tail(&tbl->proxy_queue, skb);
1592 mod_timer(&tbl->proxy_timer, sched_next);
1593 spin_unlock(&tbl->proxy_queue.lock);
1594}
1595EXPORT_SYMBOL(pneigh_enqueue);
1596
1597static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1598 struct net *net, int ifindex)
1599{
1600 struct neigh_parms *p;
1601
1602 list_for_each_entry(p, &tbl->parms_list, list) {
1603 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1604 (!p->dev && !ifindex && net_eq(net, &init_net)))
1605 return p;
1606 }
1607
1608 return NULL;
1609}
1610
1611struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1612 struct neigh_table *tbl)
1613{
1614 struct neigh_parms *p;
1615 struct net *net = dev_net(dev);
1616 const struct net_device_ops *ops = dev->netdev_ops;
1617
1618 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1619 if (p) {
1620 p->tbl = tbl;
1621 refcount_set(&p->refcnt, 1);
1622 p->reachable_time =
1623 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1624 dev_hold(dev);
1625 p->dev = dev;
1626 write_pnet(&p->net, net);
1627 p->sysctl_table = NULL;
1628
1629 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1630 dev_put(dev);
1631 kfree(p);
1632 return NULL;
1633 }
1634
1635 write_lock_bh(&tbl->lock);
1636 list_add(&p->list, &tbl->parms.list);
1637 write_unlock_bh(&tbl->lock);
1638
1639 neigh_parms_data_state_cleanall(p);
1640 }
1641 return p;
1642}
1643EXPORT_SYMBOL(neigh_parms_alloc);
1644
1645static void neigh_rcu_free_parms(struct rcu_head *head)
1646{
1647 struct neigh_parms *parms =
1648 container_of(head, struct neigh_parms, rcu_head);
1649
1650 neigh_parms_put(parms);
1651}
1652
1653void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1654{
1655 if (!parms || parms == &tbl->parms)
1656 return;
1657 write_lock_bh(&tbl->lock);
1658 list_del(&parms->list);
1659 parms->dead = 1;
1660 write_unlock_bh(&tbl->lock);
1661 if (parms->dev)
1662 dev_put(parms->dev);
1663 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1664}
1665EXPORT_SYMBOL(neigh_parms_release);
1666
1667static void neigh_parms_destroy(struct neigh_parms *parms)
1668{
1669 kfree(parms);
1670}
1671
1672static struct lock_class_key neigh_table_proxy_queue_class;
1673
1674static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1675
1676void neigh_table_init(int index, struct neigh_table *tbl)
1677{
1678 unsigned long now = jiffies;
1679 unsigned long phsize;
1680
1681 INIT_LIST_HEAD(&tbl->parms_list);
1682 INIT_LIST_HEAD(&tbl->gc_list);
1683 list_add(&tbl->parms.list, &tbl->parms_list);
1684 write_pnet(&tbl->parms.net, &init_net);
1685 refcount_set(&tbl->parms.refcnt, 1);
1686 tbl->parms.reachable_time =
1687 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1688
1689 tbl->stats = alloc_percpu(struct neigh_statistics);
1690 if (!tbl->stats)
1691 panic("cannot create neighbour cache statistics");
1692
1693#ifdef CONFIG_PROC_FS
1694 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1695 &neigh_stat_seq_ops, tbl))
1696 panic("cannot create neighbour proc dir entry");
1697#endif
1698
1699 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1700
1701 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1702 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1703
1704 if (!tbl->nht || !tbl->phash_buckets)
1705 panic("cannot allocate neighbour cache hashes");
1706
1707 if (!tbl->entry_size)
1708 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1709 tbl->key_len, NEIGH_PRIV_ALIGN);
1710 else
1711 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1712
1713 rwlock_init(&tbl->lock);
1714 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1715 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1716 tbl->parms.reachable_time);
1717 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1718 skb_queue_head_init_class(&tbl->proxy_queue,
1719 &neigh_table_proxy_queue_class);
1720
1721 tbl->last_flush = now;
1722 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1723
1724 neigh_tables[index] = tbl;
1725}
1726EXPORT_SYMBOL(neigh_table_init);
1727
1728int neigh_table_clear(int index, struct neigh_table *tbl)
1729{
1730 neigh_tables[index] = NULL;
1731 /* It is not clean... Fix it to unload IPv6 module safely */
1732 cancel_delayed_work_sync(&tbl->gc_work);
1733 del_timer_sync(&tbl->proxy_timer);
1734 pneigh_queue_purge(&tbl->proxy_queue);
1735 neigh_ifdown(tbl, NULL);
1736 if (atomic_read(&tbl->entries))
1737 pr_crit("neighbour leakage\n");
1738
1739 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1740 neigh_hash_free_rcu);
1741 tbl->nht = NULL;
1742
1743 kfree(tbl->phash_buckets);
1744 tbl->phash_buckets = NULL;
1745
1746 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1747
1748 free_percpu(tbl->stats);
1749 tbl->stats = NULL;
1750
1751 return 0;
1752}
1753EXPORT_SYMBOL(neigh_table_clear);
1754
1755static struct neigh_table *neigh_find_table(int family)
1756{
1757 struct neigh_table *tbl = NULL;
1758
1759 switch (family) {
1760 case AF_INET:
1761 tbl = neigh_tables[NEIGH_ARP_TABLE];
1762 break;
1763 case AF_INET6:
1764 tbl = neigh_tables[NEIGH_ND_TABLE];
1765 break;
1766 case AF_DECnet:
1767 tbl = neigh_tables[NEIGH_DN_TABLE];
1768 break;
1769 }
1770
1771 return tbl;
1772}
1773
1774const struct nla_policy nda_policy[NDA_MAX+1] = {
1775 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1776 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1777 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1778 [NDA_PROBES] = { .type = NLA_U32 },
1779 [NDA_VLAN] = { .type = NLA_U16 },
1780 [NDA_PORT] = { .type = NLA_U16 },
1781 [NDA_VNI] = { .type = NLA_U32 },
1782 [NDA_IFINDEX] = { .type = NLA_U32 },
1783 [NDA_MASTER] = { .type = NLA_U32 },
1784 [NDA_PROTOCOL] = { .type = NLA_U8 },
1785};
1786
1787static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1788 struct netlink_ext_ack *extack)
1789{
1790 struct net *net = sock_net(skb->sk);
1791 struct ndmsg *ndm;
1792 struct nlattr *dst_attr;
1793 struct neigh_table *tbl;
1794 struct neighbour *neigh;
1795 struct net_device *dev = NULL;
1796 int err = -EINVAL;
1797
1798 ASSERT_RTNL();
1799 if (nlmsg_len(nlh) < sizeof(*ndm))
1800 goto out;
1801
1802 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1803 if (!dst_attr) {
1804 NL_SET_ERR_MSG(extack, "Network address not specified");
1805 goto out;
1806 }
1807
1808 ndm = nlmsg_data(nlh);
1809 if (ndm->ndm_ifindex) {
1810 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1811 if (dev == NULL) {
1812 err = -ENODEV;
1813 goto out;
1814 }
1815 }
1816
1817 tbl = neigh_find_table(ndm->ndm_family);
1818 if (tbl == NULL)
1819 return -EAFNOSUPPORT;
1820
1821 if (nla_len(dst_attr) < (int)tbl->key_len) {
1822 NL_SET_ERR_MSG(extack, "Invalid network address");
1823 goto out;
1824 }
1825
1826 if (ndm->ndm_flags & NTF_PROXY) {
1827 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1828 goto out;
1829 }
1830
1831 if (dev == NULL)
1832 goto out;
1833
1834 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1835 if (neigh == NULL) {
1836 err = -ENOENT;
1837 goto out;
1838 }
1839
1840 err = __neigh_update(neigh, NULL, NUD_FAILED,
1841 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1842 NETLINK_CB(skb).portid, extack);
1843 write_lock_bh(&tbl->lock);
1844 neigh_release(neigh);
1845 neigh_remove_one(neigh, tbl);
1846 write_unlock_bh(&tbl->lock);
1847
1848out:
1849 return err;
1850}
1851
1852static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1853 struct netlink_ext_ack *extack)
1854{
1855 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1856 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1857 struct net *net = sock_net(skb->sk);
1858 struct ndmsg *ndm;
1859 struct nlattr *tb[NDA_MAX+1];
1860 struct neigh_table *tbl;
1861 struct net_device *dev = NULL;
1862 struct neighbour *neigh;
1863 void *dst, *lladdr;
1864 u8 protocol = 0;
1865 int err;
1866
1867 ASSERT_RTNL();
1868 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1869 nda_policy, extack);
1870 if (err < 0)
1871 goto out;
1872
1873 err = -EINVAL;
1874 if (!tb[NDA_DST]) {
1875 NL_SET_ERR_MSG(extack, "Network address not specified");
1876 goto out;
1877 }
1878
1879 ndm = nlmsg_data(nlh);
1880 if (ndm->ndm_ifindex) {
1881 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1882 if (dev == NULL) {
1883 err = -ENODEV;
1884 goto out;
1885 }
1886
1887 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1888 NL_SET_ERR_MSG(extack, "Invalid link address");
1889 goto out;
1890 }
1891 }
1892
1893 tbl = neigh_find_table(ndm->ndm_family);
1894 if (tbl == NULL)
1895 return -EAFNOSUPPORT;
1896
1897 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1898 NL_SET_ERR_MSG(extack, "Invalid network address");
1899 goto out;
1900 }
1901
1902 dst = nla_data(tb[NDA_DST]);
1903 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1904
1905 if (tb[NDA_PROTOCOL])
1906 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1907
1908 if (ndm->ndm_flags & NTF_PROXY) {
1909 struct pneigh_entry *pn;
1910
1911 err = -ENOBUFS;
1912 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1913 if (pn) {
1914 pn->flags = ndm->ndm_flags;
1915 if (protocol)
1916 pn->protocol = protocol;
1917 err = 0;
1918 }
1919 goto out;
1920 }
1921
1922 if (!dev) {
1923 NL_SET_ERR_MSG(extack, "Device not specified");
1924 goto out;
1925 }
1926
1927 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1928 err = -EINVAL;
1929 goto out;
1930 }
1931
1932 neigh = neigh_lookup(tbl, dst, dev);
1933 if (neigh == NULL) {
1934 bool exempt_from_gc;
1935
1936 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1937 err = -ENOENT;
1938 goto out;
1939 }
1940
1941 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1942 ndm->ndm_flags & NTF_EXT_LEARNED;
1943 neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
1944 if (IS_ERR(neigh)) {
1945 err = PTR_ERR(neigh);
1946 goto out;
1947 }
1948 } else {
1949 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1950 err = -EEXIST;
1951 neigh_release(neigh);
1952 goto out;
1953 }
1954
1955 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1956 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1957 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1958 }
1959
1960 if (ndm->ndm_flags & NTF_EXT_LEARNED)
1961 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1962
1963 if (ndm->ndm_flags & NTF_ROUTER)
1964 flags |= NEIGH_UPDATE_F_ISROUTER;
1965
1966 if (ndm->ndm_flags & NTF_USE) {
1967 neigh_event_send(neigh, NULL);
1968 err = 0;
1969 } else
1970 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1971 NETLINK_CB(skb).portid, extack);
1972
1973 if (protocol)
1974 neigh->protocol = protocol;
1975
1976 neigh_release(neigh);
1977
1978out:
1979 return err;
1980}
1981
1982static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1983{
1984 struct nlattr *nest;
1985
1986 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
1987 if (nest == NULL)
1988 return -ENOBUFS;
1989
1990 if ((parms->dev &&
1991 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1992 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1993 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1994 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1995 /* approximative value for deprecated QUEUE_LEN (in packets) */
1996 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1997 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1998 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1999 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2000 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2001 NEIGH_VAR(parms, UCAST_PROBES)) ||
2002 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2003 NEIGH_VAR(parms, MCAST_PROBES)) ||
2004 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2005 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2006 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2007 NDTPA_PAD) ||
2008 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2009 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2010 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2011 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2012 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2013 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2014 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2015 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2016 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2017 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2018 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2019 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2020 nla_put_msecs(skb, NDTPA_LOCKTIME,
2021 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2022 goto nla_put_failure;
2023 return nla_nest_end(skb, nest);
2024
2025nla_put_failure:
2026 nla_nest_cancel(skb, nest);
2027 return -EMSGSIZE;
2028}
2029
2030static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2031 u32 pid, u32 seq, int type, int flags)
2032{
2033 struct nlmsghdr *nlh;
2034 struct ndtmsg *ndtmsg;
2035
2036 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2037 if (nlh == NULL)
2038 return -EMSGSIZE;
2039
2040 ndtmsg = nlmsg_data(nlh);
2041
2042 read_lock_bh(&tbl->lock);
2043 ndtmsg->ndtm_family = tbl->family;
2044 ndtmsg->ndtm_pad1 = 0;
2045 ndtmsg->ndtm_pad2 = 0;
2046
2047 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2048 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2049 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2050 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2051 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2052 goto nla_put_failure;
2053 {
2054 unsigned long now = jiffies;
2055 unsigned int flush_delta = now - tbl->last_flush;
2056 unsigned int rand_delta = now - tbl->last_rand;
2057 struct neigh_hash_table *nht;
2058 struct ndt_config ndc = {
2059 .ndtc_key_len = tbl->key_len,
2060 .ndtc_entry_size = tbl->entry_size,
2061 .ndtc_entries = atomic_read(&tbl->entries),
2062 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2063 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2064 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2065 };
2066
2067 rcu_read_lock_bh();
2068 nht = rcu_dereference_bh(tbl->nht);
2069 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2070 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2071 rcu_read_unlock_bh();
2072
2073 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2074 goto nla_put_failure;
2075 }
2076
2077 {
2078 int cpu;
2079 struct ndt_stats ndst;
2080
2081 memset(&ndst, 0, sizeof(ndst));
2082
2083 for_each_possible_cpu(cpu) {
2084 struct neigh_statistics *st;
2085
2086 st = per_cpu_ptr(tbl->stats, cpu);
2087 ndst.ndts_allocs += st->allocs;
2088 ndst.ndts_destroys += st->destroys;
2089 ndst.ndts_hash_grows += st->hash_grows;
2090 ndst.ndts_res_failed += st->res_failed;
2091 ndst.ndts_lookups += st->lookups;
2092 ndst.ndts_hits += st->hits;
2093 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2094 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2095 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2096 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
2097 ndst.ndts_table_fulls += st->table_fulls;
2098 }
2099
2100 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2101 NDTA_PAD))
2102 goto nla_put_failure;
2103 }
2104
2105 BUG_ON(tbl->parms.dev);
2106 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2107 goto nla_put_failure;
2108
2109 read_unlock_bh(&tbl->lock);
2110 nlmsg_end(skb, nlh);
2111 return 0;
2112
2113nla_put_failure:
2114 read_unlock_bh(&tbl->lock);
2115 nlmsg_cancel(skb, nlh);
2116 return -EMSGSIZE;
2117}
2118
2119static int neightbl_fill_param_info(struct sk_buff *skb,
2120 struct neigh_table *tbl,
2121 struct neigh_parms *parms,
2122 u32 pid, u32 seq, int type,
2123 unsigned int flags)
2124{
2125 struct ndtmsg *ndtmsg;
2126 struct nlmsghdr *nlh;
2127
2128 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2129 if (nlh == NULL)
2130 return -EMSGSIZE;
2131
2132 ndtmsg = nlmsg_data(nlh);
2133
2134 read_lock_bh(&tbl->lock);
2135 ndtmsg->ndtm_family = tbl->family;
2136 ndtmsg->ndtm_pad1 = 0;
2137 ndtmsg->ndtm_pad2 = 0;
2138
2139 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2140 neightbl_fill_parms(skb, parms) < 0)
2141 goto errout;
2142
2143 read_unlock_bh(&tbl->lock);
2144 nlmsg_end(skb, nlh);
2145 return 0;
2146errout:
2147 read_unlock_bh(&tbl->lock);
2148 nlmsg_cancel(skb, nlh);
2149 return -EMSGSIZE;
2150}
2151
2152static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2153 [NDTA_NAME] = { .type = NLA_STRING },
2154 [NDTA_THRESH1] = { .type = NLA_U32 },
2155 [NDTA_THRESH2] = { .type = NLA_U32 },
2156 [NDTA_THRESH3] = { .type = NLA_U32 },
2157 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2158 [NDTA_PARMS] = { .type = NLA_NESTED },
2159};
2160
2161static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2162 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2163 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2164 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2165 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2166 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2167 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2168 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2169 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2170 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2171 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2172 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2173 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2174 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2175 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2176};
2177
2178static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2179 struct netlink_ext_ack *extack)
2180{
2181 struct net *net = sock_net(skb->sk);
2182 struct neigh_table *tbl;
2183 struct ndtmsg *ndtmsg;
2184 struct nlattr *tb[NDTA_MAX+1];
2185 bool found = false;
2186 int err, tidx;
2187
2188 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2189 nl_neightbl_policy, extack);
2190 if (err < 0)
2191 goto errout;
2192
2193 if (tb[NDTA_NAME] == NULL) {
2194 err = -EINVAL;
2195 goto errout;
2196 }
2197
2198 ndtmsg = nlmsg_data(nlh);
2199
2200 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2201 tbl = neigh_tables[tidx];
2202 if (!tbl)
2203 continue;
2204 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2205 continue;
2206 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2207 found = true;
2208 break;
2209 }
2210 }
2211
2212 if (!found)
2213 return -ENOENT;
2214
2215 /*
2216 * We acquire tbl->lock to be nice to the periodic timers and
2217 * make sure they always see a consistent set of values.
2218 */
2219 write_lock_bh(&tbl->lock);
2220
2221 if (tb[NDTA_PARMS]) {
2222 struct nlattr *tbp[NDTPA_MAX+1];
2223 struct neigh_parms *p;
2224 int i, ifindex = 0;
2225
2226 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2227 tb[NDTA_PARMS],
2228 nl_ntbl_parm_policy, extack);
2229 if (err < 0)
2230 goto errout_tbl_lock;
2231
2232 if (tbp[NDTPA_IFINDEX])
2233 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2234
2235 p = lookup_neigh_parms(tbl, net, ifindex);
2236 if (p == NULL) {
2237 err = -ENOENT;
2238 goto errout_tbl_lock;
2239 }
2240
2241 for (i = 1; i <= NDTPA_MAX; i++) {
2242 if (tbp[i] == NULL)
2243 continue;
2244
2245 switch (i) {
2246 case NDTPA_QUEUE_LEN:
2247 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2248 nla_get_u32(tbp[i]) *
2249 SKB_TRUESIZE(ETH_FRAME_LEN));
2250 break;
2251 case NDTPA_QUEUE_LENBYTES:
2252 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2253 nla_get_u32(tbp[i]));
2254 break;
2255 case NDTPA_PROXY_QLEN:
2256 NEIGH_VAR_SET(p, PROXY_QLEN,
2257 nla_get_u32(tbp[i]));
2258 break;
2259 case NDTPA_APP_PROBES:
2260 NEIGH_VAR_SET(p, APP_PROBES,
2261 nla_get_u32(tbp[i]));
2262 break;
2263 case NDTPA_UCAST_PROBES:
2264 NEIGH_VAR_SET(p, UCAST_PROBES,
2265 nla_get_u32(tbp[i]));
2266 break;
2267 case NDTPA_MCAST_PROBES:
2268 NEIGH_VAR_SET(p, MCAST_PROBES,
2269 nla_get_u32(tbp[i]));
2270 break;
2271 case NDTPA_MCAST_REPROBES:
2272 NEIGH_VAR_SET(p, MCAST_REPROBES,
2273 nla_get_u32(tbp[i]));
2274 break;
2275 case NDTPA_BASE_REACHABLE_TIME:
2276 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2277 nla_get_msecs(tbp[i]));
2278 /* update reachable_time as well, otherwise, the change will
2279 * only be effective after the next time neigh_periodic_work
2280 * decides to recompute it (can be multiple minutes)
2281 */
2282 p->reachable_time =
2283 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2284 break;
2285 case NDTPA_GC_STALETIME:
2286 NEIGH_VAR_SET(p, GC_STALETIME,
2287 nla_get_msecs(tbp[i]));
2288 break;
2289 case NDTPA_DELAY_PROBE_TIME:
2290 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2291 nla_get_msecs(tbp[i]));
2292 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2293 break;
2294 case NDTPA_RETRANS_TIME:
2295 NEIGH_VAR_SET(p, RETRANS_TIME,
2296 nla_get_msecs(tbp[i]));
2297 break;
2298 case NDTPA_ANYCAST_DELAY:
2299 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2300 nla_get_msecs(tbp[i]));
2301 break;
2302 case NDTPA_PROXY_DELAY:
2303 NEIGH_VAR_SET(p, PROXY_DELAY,
2304 nla_get_msecs(tbp[i]));
2305 break;
2306 case NDTPA_LOCKTIME:
2307 NEIGH_VAR_SET(p, LOCKTIME,
2308 nla_get_msecs(tbp[i]));
2309 break;
2310 }
2311 }
2312 }
2313
2314 err = -ENOENT;
2315 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2316 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2317 !net_eq(net, &init_net))
2318 goto errout_tbl_lock;
2319
2320 if (tb[NDTA_THRESH1])
2321 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2322
2323 if (tb[NDTA_THRESH2])
2324 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2325
2326 if (tb[NDTA_THRESH3])
2327 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2328
2329 if (tb[NDTA_GC_INTERVAL])
2330 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2331
2332 err = 0;
2333
2334errout_tbl_lock:
2335 write_unlock_bh(&tbl->lock);
2336errout:
2337 return err;
2338}
2339
2340static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2341 struct netlink_ext_ack *extack)
2342{
2343 struct ndtmsg *ndtm;
2344
2345 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2346 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2347 return -EINVAL;
2348 }
2349
2350 ndtm = nlmsg_data(nlh);
2351 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2352 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2353 return -EINVAL;
2354 }
2355
2356 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2357 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2358 return -EINVAL;
2359 }
2360
2361 return 0;
2362}
2363
2364static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2365{
2366 const struct nlmsghdr *nlh = cb->nlh;
2367 struct net *net = sock_net(skb->sk);
2368 int family, tidx, nidx = 0;
2369 int tbl_skip = cb->args[0];
2370 int neigh_skip = cb->args[1];
2371 struct neigh_table *tbl;
2372
2373 if (cb->strict_check) {
2374 int err = neightbl_valid_dump_info(nlh, cb->extack);
2375
2376 if (err < 0)
2377 return err;
2378 }
2379
2380 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2381
2382 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2383 struct neigh_parms *p;
2384
2385 tbl = neigh_tables[tidx];
2386 if (!tbl)
2387 continue;
2388
2389 if (tidx < tbl_skip || (family && tbl->family != family))
2390 continue;
2391
2392 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2393 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2394 NLM_F_MULTI) < 0)
2395 break;
2396
2397 nidx = 0;
2398 p = list_next_entry(&tbl->parms, list);
2399 list_for_each_entry_from(p, &tbl->parms_list, list) {
2400 if (!net_eq(neigh_parms_net(p), net))
2401 continue;
2402
2403 if (nidx < neigh_skip)
2404 goto next;
2405
2406 if (neightbl_fill_param_info(skb, tbl, p,
2407 NETLINK_CB(cb->skb).portid,
2408 nlh->nlmsg_seq,
2409 RTM_NEWNEIGHTBL,
2410 NLM_F_MULTI) < 0)
2411 goto out;
2412 next:
2413 nidx++;
2414 }
2415
2416 neigh_skip = 0;
2417 }
2418out:
2419 cb->args[0] = tidx;
2420 cb->args[1] = nidx;
2421
2422 return skb->len;
2423}
2424
2425static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2426 u32 pid, u32 seq, int type, unsigned int flags)
2427{
2428 unsigned long now = jiffies;
2429 struct nda_cacheinfo ci;
2430 struct nlmsghdr *nlh;
2431 struct ndmsg *ndm;
2432
2433 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2434 if (nlh == NULL)
2435 return -EMSGSIZE;
2436
2437 ndm = nlmsg_data(nlh);
2438 ndm->ndm_family = neigh->ops->family;
2439 ndm->ndm_pad1 = 0;
2440 ndm->ndm_pad2 = 0;
2441 ndm->ndm_flags = neigh->flags;
2442 ndm->ndm_type = neigh->type;
2443 ndm->ndm_ifindex = neigh->dev->ifindex;
2444
2445 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2446 goto nla_put_failure;
2447
2448 read_lock_bh(&neigh->lock);
2449 ndm->ndm_state = neigh->nud_state;
2450 if (neigh->nud_state & NUD_VALID) {
2451 char haddr[MAX_ADDR_LEN];
2452
2453 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2454 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2455 read_unlock_bh(&neigh->lock);
2456 goto nla_put_failure;
2457 }
2458 }
2459
2460 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2461 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2462 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2463 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2464 read_unlock_bh(&neigh->lock);
2465
2466 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2467 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2468 goto nla_put_failure;
2469
2470 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2471 goto nla_put_failure;
2472
2473 nlmsg_end(skb, nlh);
2474 return 0;
2475
2476nla_put_failure:
2477 nlmsg_cancel(skb, nlh);
2478 return -EMSGSIZE;
2479}
2480
2481static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2482 u32 pid, u32 seq, int type, unsigned int flags,
2483 struct neigh_table *tbl)
2484{
2485 struct nlmsghdr *nlh;
2486 struct ndmsg *ndm;
2487
2488 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2489 if (nlh == NULL)
2490 return -EMSGSIZE;
2491
2492 ndm = nlmsg_data(nlh);
2493 ndm->ndm_family = tbl->family;
2494 ndm->ndm_pad1 = 0;
2495 ndm->ndm_pad2 = 0;
2496 ndm->ndm_flags = pn->flags | NTF_PROXY;
2497 ndm->ndm_type = RTN_UNICAST;
2498 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2499 ndm->ndm_state = NUD_NONE;
2500
2501 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2502 goto nla_put_failure;
2503
2504 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2505 goto nla_put_failure;
2506
2507 nlmsg_end(skb, nlh);
2508 return 0;
2509
2510nla_put_failure:
2511 nlmsg_cancel(skb, nlh);
2512 return -EMSGSIZE;
2513}
2514
2515static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2516{
2517 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2518 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2519}
2520
2521static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2522{
2523 struct net_device *master;
2524
2525 if (!master_idx)
2526 return false;
2527
2528 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2529 if (!master || master->ifindex != master_idx)
2530 return true;
2531
2532 return false;
2533}
2534
2535static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2536{
2537 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2538 return true;
2539
2540 return false;
2541}
2542
2543struct neigh_dump_filter {
2544 int master_idx;
2545 int dev_idx;
2546};
2547
2548static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2549 struct netlink_callback *cb,
2550 struct neigh_dump_filter *filter)
2551{
2552 struct net *net = sock_net(skb->sk);
2553 struct neighbour *n;
2554 int rc, h, s_h = cb->args[1];
2555 int idx, s_idx = idx = cb->args[2];
2556 struct neigh_hash_table *nht;
2557 unsigned int flags = NLM_F_MULTI;
2558
2559 if (filter->dev_idx || filter->master_idx)
2560 flags |= NLM_F_DUMP_FILTERED;
2561
2562 rcu_read_lock_bh();
2563 nht = rcu_dereference_bh(tbl->nht);
2564
2565 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2566 if (h > s_h)
2567 s_idx = 0;
2568 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2569 n != NULL;
2570 n = rcu_dereference_bh(n->next)) {
2571 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2572 goto next;
2573 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2574 neigh_master_filtered(n->dev, filter->master_idx))
2575 goto next;
2576 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2577 cb->nlh->nlmsg_seq,
2578 RTM_NEWNEIGH,
2579 flags) < 0) {
2580 rc = -1;
2581 goto out;
2582 }
2583next:
2584 idx++;
2585 }
2586 }
2587 rc = skb->len;
2588out:
2589 rcu_read_unlock_bh();
2590 cb->args[1] = h;
2591 cb->args[2] = idx;
2592 return rc;
2593}
2594
2595static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2596 struct netlink_callback *cb,
2597 struct neigh_dump_filter *filter)
2598{
2599 struct pneigh_entry *n;
2600 struct net *net = sock_net(skb->sk);
2601 int rc, h, s_h = cb->args[3];
2602 int idx, s_idx = idx = cb->args[4];
2603 unsigned int flags = NLM_F_MULTI;
2604
2605 if (filter->dev_idx || filter->master_idx)
2606 flags |= NLM_F_DUMP_FILTERED;
2607
2608 read_lock_bh(&tbl->lock);
2609
2610 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2611 if (h > s_h)
2612 s_idx = 0;
2613 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2614 if (idx < s_idx || pneigh_net(n) != net)
2615 goto next;
2616 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2617 neigh_master_filtered(n->dev, filter->master_idx))
2618 goto next;
2619 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2620 cb->nlh->nlmsg_seq,
2621 RTM_NEWNEIGH, flags, tbl) < 0) {
2622 read_unlock_bh(&tbl->lock);
2623 rc = -1;
2624 goto out;
2625 }
2626 next:
2627 idx++;
2628 }
2629 }
2630
2631 read_unlock_bh(&tbl->lock);
2632 rc = skb->len;
2633out:
2634 cb->args[3] = h;
2635 cb->args[4] = idx;
2636 return rc;
2637
2638}
2639
2640static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2641 bool strict_check,
2642 struct neigh_dump_filter *filter,
2643 struct netlink_ext_ack *extack)
2644{
2645 struct nlattr *tb[NDA_MAX + 1];
2646 int err, i;
2647
2648 if (strict_check) {
2649 struct ndmsg *ndm;
2650
2651 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2652 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2653 return -EINVAL;
2654 }
2655
2656 ndm = nlmsg_data(nlh);
2657 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2658 ndm->ndm_state || ndm->ndm_type) {
2659 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2660 return -EINVAL;
2661 }
2662
2663 if (ndm->ndm_flags & ~NTF_PROXY) {
2664 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2665 return -EINVAL;
2666 }
2667
2668 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2669 tb, NDA_MAX, nda_policy,
2670 extack);
2671 } else {
2672 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2673 NDA_MAX, nda_policy, extack);
2674 }
2675 if (err < 0)
2676 return err;
2677
2678 for (i = 0; i <= NDA_MAX; ++i) {
2679 if (!tb[i])
2680 continue;
2681
2682 /* all new attributes should require strict_check */
2683 switch (i) {
2684 case NDA_IFINDEX:
2685 filter->dev_idx = nla_get_u32(tb[i]);
2686 break;
2687 case NDA_MASTER:
2688 filter->master_idx = nla_get_u32(tb[i]);
2689 break;
2690 default:
2691 if (strict_check) {
2692 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2693 return -EINVAL;
2694 }
2695 }
2696 }
2697
2698 return 0;
2699}
2700
2701static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2702{
2703 const struct nlmsghdr *nlh = cb->nlh;
2704 struct neigh_dump_filter filter = {};
2705 struct neigh_table *tbl;
2706 int t, family, s_t;
2707 int proxy = 0;
2708 int err;
2709
2710 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2711
2712 /* check for full ndmsg structure presence, family member is
2713 * the same for both structures
2714 */
2715 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2716 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2717 proxy = 1;
2718
2719 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2720 if (err < 0 && cb->strict_check)
2721 return err;
2722
2723 s_t = cb->args[0];
2724
2725 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2726 tbl = neigh_tables[t];
2727
2728 if (!tbl)
2729 continue;
2730 if (t < s_t || (family && tbl->family != family))
2731 continue;
2732 if (t > s_t)
2733 memset(&cb->args[1], 0, sizeof(cb->args) -
2734 sizeof(cb->args[0]));
2735 if (proxy)
2736 err = pneigh_dump_table(tbl, skb, cb, &filter);
2737 else
2738 err = neigh_dump_table(tbl, skb, cb, &filter);
2739 if (err < 0)
2740 break;
2741 }
2742
2743 cb->args[0] = t;
2744 return skb->len;
2745}
2746
2747static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2748 struct neigh_table **tbl,
2749 void **dst, int *dev_idx, u8 *ndm_flags,
2750 struct netlink_ext_ack *extack)
2751{
2752 struct nlattr *tb[NDA_MAX + 1];
2753 struct ndmsg *ndm;
2754 int err, i;
2755
2756 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2757 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2758 return -EINVAL;
2759 }
2760
2761 ndm = nlmsg_data(nlh);
2762 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2763 ndm->ndm_type) {
2764 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2765 return -EINVAL;
2766 }
2767
2768 if (ndm->ndm_flags & ~NTF_PROXY) {
2769 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2770 return -EINVAL;
2771 }
2772
2773 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2774 NDA_MAX, nda_policy, extack);
2775 if (err < 0)
2776 return err;
2777
2778 *ndm_flags = ndm->ndm_flags;
2779 *dev_idx = ndm->ndm_ifindex;
2780 *tbl = neigh_find_table(ndm->ndm_family);
2781 if (*tbl == NULL) {
2782 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2783 return -EAFNOSUPPORT;
2784 }
2785
2786 for (i = 0; i <= NDA_MAX; ++i) {
2787 if (!tb[i])
2788 continue;
2789
2790 switch (i) {
2791 case NDA_DST:
2792 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2793 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2794 return -EINVAL;
2795 }
2796 *dst = nla_data(tb[i]);
2797 break;
2798 default:
2799 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2800 return -EINVAL;
2801 }
2802 }
2803
2804 return 0;
2805}
2806
2807static inline size_t neigh_nlmsg_size(void)
2808{
2809 return NLMSG_ALIGN(sizeof(struct ndmsg))
2810 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2811 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2812 + nla_total_size(sizeof(struct nda_cacheinfo))
2813 + nla_total_size(4) /* NDA_PROBES */
2814 + nla_total_size(1); /* NDA_PROTOCOL */
2815}
2816
2817static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2818 u32 pid, u32 seq)
2819{
2820 struct sk_buff *skb;
2821 int err = 0;
2822
2823 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2824 if (!skb)
2825 return -ENOBUFS;
2826
2827 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2828 if (err) {
2829 kfree_skb(skb);
2830 goto errout;
2831 }
2832
2833 err = rtnl_unicast(skb, net, pid);
2834errout:
2835 return err;
2836}
2837
2838static inline size_t pneigh_nlmsg_size(void)
2839{
2840 return NLMSG_ALIGN(sizeof(struct ndmsg))
2841 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2842 + nla_total_size(1); /* NDA_PROTOCOL */
2843}
2844
2845static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2846 u32 pid, u32 seq, struct neigh_table *tbl)
2847{
2848 struct sk_buff *skb;
2849 int err = 0;
2850
2851 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2852 if (!skb)
2853 return -ENOBUFS;
2854
2855 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2856 if (err) {
2857 kfree_skb(skb);
2858 goto errout;
2859 }
2860
2861 err = rtnl_unicast(skb, net, pid);
2862errout:
2863 return err;
2864}
2865
2866static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2867 struct netlink_ext_ack *extack)
2868{
2869 struct net *net = sock_net(in_skb->sk);
2870 struct net_device *dev = NULL;
2871 struct neigh_table *tbl = NULL;
2872 struct neighbour *neigh;
2873 void *dst = NULL;
2874 u8 ndm_flags = 0;
2875 int dev_idx = 0;
2876 int err;
2877
2878 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2879 extack);
2880 if (err < 0)
2881 return err;
2882
2883 if (dev_idx) {
2884 dev = __dev_get_by_index(net, dev_idx);
2885 if (!dev) {
2886 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2887 return -ENODEV;
2888 }
2889 }
2890
2891 if (!dst) {
2892 NL_SET_ERR_MSG(extack, "Network address not specified");
2893 return -EINVAL;
2894 }
2895
2896 if (ndm_flags & NTF_PROXY) {
2897 struct pneigh_entry *pn;
2898
2899 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2900 if (!pn) {
2901 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2902 return -ENOENT;
2903 }
2904 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2905 nlh->nlmsg_seq, tbl);
2906 }
2907
2908 if (!dev) {
2909 NL_SET_ERR_MSG(extack, "No device specified");
2910 return -EINVAL;
2911 }
2912
2913 neigh = neigh_lookup(tbl, dst, dev);
2914 if (!neigh) {
2915 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2916 return -ENOENT;
2917 }
2918
2919 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2920 nlh->nlmsg_seq);
2921
2922 neigh_release(neigh);
2923
2924 return err;
2925}
2926
2927void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2928{
2929 int chain;
2930 struct neigh_hash_table *nht;
2931
2932 rcu_read_lock_bh();
2933 nht = rcu_dereference_bh(tbl->nht);
2934
2935 read_lock(&tbl->lock); /* avoid resizes */
2936 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2937 struct neighbour *n;
2938
2939 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2940 n != NULL;
2941 n = rcu_dereference_bh(n->next))
2942 cb(n, cookie);
2943 }
2944 read_unlock(&tbl->lock);
2945 rcu_read_unlock_bh();
2946}
2947EXPORT_SYMBOL(neigh_for_each);
2948
2949/* The tbl->lock must be held as a writer and BH disabled. */
2950void __neigh_for_each_release(struct neigh_table *tbl,
2951 int (*cb)(struct neighbour *))
2952{
2953 int chain;
2954 struct neigh_hash_table *nht;
2955
2956 nht = rcu_dereference_protected(tbl->nht,
2957 lockdep_is_held(&tbl->lock));
2958 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2959 struct neighbour *n;
2960 struct neighbour __rcu **np;
2961
2962 np = &nht->hash_buckets[chain];
2963 while ((n = rcu_dereference_protected(*np,
2964 lockdep_is_held(&tbl->lock))) != NULL) {
2965 int release;
2966
2967 write_lock(&n->lock);
2968 release = cb(n);
2969 if (release) {
2970 rcu_assign_pointer(*np,
2971 rcu_dereference_protected(n->next,
2972 lockdep_is_held(&tbl->lock)));
2973 neigh_mark_dead(n);
2974 } else
2975 np = &n->next;
2976 write_unlock(&n->lock);
2977 if (release)
2978 neigh_cleanup_and_release(n);
2979 }
2980 }
2981}
2982EXPORT_SYMBOL(__neigh_for_each_release);
2983
2984int neigh_xmit(int index, struct net_device *dev,
2985 const void *addr, struct sk_buff *skb)
2986{
2987 int err = -EAFNOSUPPORT;
2988 if (likely(index < NEIGH_NR_TABLES)) {
2989 struct neigh_table *tbl;
2990 struct neighbour *neigh;
2991
2992 tbl = neigh_tables[index];
2993 if (!tbl)
2994 goto out;
2995 rcu_read_lock_bh();
2996 if (index == NEIGH_ARP_TABLE) {
2997 u32 key = *((u32 *)addr);
2998
2999 neigh = __ipv4_neigh_lookup_noref(dev, key);
3000 } else {
3001 neigh = __neigh_lookup_noref(tbl, addr, dev);
3002 }
3003 if (!neigh)
3004 neigh = __neigh_create(tbl, addr, dev, false);
3005 err = PTR_ERR(neigh);
3006 if (IS_ERR(neigh)) {
3007 rcu_read_unlock_bh();
3008 goto out_kfree_skb;
3009 }
3010 err = neigh->output(neigh, skb);
3011 rcu_read_unlock_bh();
3012 }
3013 else if (index == NEIGH_LINK_TABLE) {
3014 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3015 addr, NULL, skb->len);
3016 if (err < 0)
3017 goto out_kfree_skb;
3018 err = dev_queue_xmit(skb);
3019 }
3020out:
3021 return err;
3022out_kfree_skb:
3023 kfree_skb(skb);
3024 goto out;
3025}
3026EXPORT_SYMBOL(neigh_xmit);
3027
3028#ifdef CONFIG_PROC_FS
3029
3030static struct neighbour *neigh_get_first(struct seq_file *seq)
3031{
3032 struct neigh_seq_state *state = seq->private;
3033 struct net *net = seq_file_net(seq);
3034 struct neigh_hash_table *nht = state->nht;
3035 struct neighbour *n = NULL;
3036 int bucket;
3037
3038 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3039 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3040 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3041
3042 while (n) {
3043 if (!net_eq(dev_net(n->dev), net))
3044 goto next;
3045 if (state->neigh_sub_iter) {
3046 loff_t fakep = 0;
3047 void *v;
3048
3049 v = state->neigh_sub_iter(state, n, &fakep);
3050 if (!v)
3051 goto next;
3052 }
3053 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3054 break;
3055 if (n->nud_state & ~NUD_NOARP)
3056 break;
3057next:
3058 n = rcu_dereference_bh(n->next);
3059 }
3060
3061 if (n)
3062 break;
3063 }
3064 state->bucket = bucket;
3065
3066 return n;
3067}
3068
3069static struct neighbour *neigh_get_next(struct seq_file *seq,
3070 struct neighbour *n,
3071 loff_t *pos)
3072{
3073 struct neigh_seq_state *state = seq->private;
3074 struct net *net = seq_file_net(seq);
3075 struct neigh_hash_table *nht = state->nht;
3076
3077 if (state->neigh_sub_iter) {
3078 void *v = state->neigh_sub_iter(state, n, pos);
3079 if (v)
3080 return n;
3081 }
3082 n = rcu_dereference_bh(n->next);
3083
3084 while (1) {
3085 while (n) {
3086 if (!net_eq(dev_net(n->dev), net))
3087 goto next;
3088 if (state->neigh_sub_iter) {
3089 void *v = state->neigh_sub_iter(state, n, pos);
3090 if (v)
3091 return n;
3092 goto next;
3093 }
3094 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3095 break;
3096
3097 if (n->nud_state & ~NUD_NOARP)
3098 break;
3099next:
3100 n = rcu_dereference_bh(n->next);
3101 }
3102
3103 if (n)
3104 break;
3105
3106 if (++state->bucket >= (1 << nht->hash_shift))
3107 break;
3108
3109 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3110 }
3111
3112 if (n && pos)
3113 --(*pos);
3114 return n;
3115}
3116
3117static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3118{
3119 struct neighbour *n = neigh_get_first(seq);
3120
3121 if (n) {
3122 --(*pos);
3123 while (*pos) {
3124 n = neigh_get_next(seq, n, pos);
3125 if (!n)
3126 break;
3127 }
3128 }
3129 return *pos ? NULL : n;
3130}
3131
3132static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3133{
3134 struct neigh_seq_state *state = seq->private;
3135 struct net *net = seq_file_net(seq);
3136 struct neigh_table *tbl = state->tbl;
3137 struct pneigh_entry *pn = NULL;
3138 int bucket = state->bucket;
3139
3140 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3141 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3142 pn = tbl->phash_buckets[bucket];
3143 while (pn && !net_eq(pneigh_net(pn), net))
3144 pn = pn->next;
3145 if (pn)
3146 break;
3147 }
3148 state->bucket = bucket;
3149
3150 return pn;
3151}
3152
3153static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3154 struct pneigh_entry *pn,
3155 loff_t *pos)
3156{
3157 struct neigh_seq_state *state = seq->private;
3158 struct net *net = seq_file_net(seq);
3159 struct neigh_table *tbl = state->tbl;
3160
3161 do {
3162 pn = pn->next;
3163 } while (pn && !net_eq(pneigh_net(pn), net));
3164
3165 while (!pn) {
3166 if (++state->bucket > PNEIGH_HASHMASK)
3167 break;
3168 pn = tbl->phash_buckets[state->bucket];
3169 while (pn && !net_eq(pneigh_net(pn), net))
3170 pn = pn->next;
3171 if (pn)
3172 break;
3173 }
3174
3175 if (pn && pos)
3176 --(*pos);
3177
3178 return pn;
3179}
3180
3181static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3182{
3183 struct pneigh_entry *pn = pneigh_get_first(seq);
3184
3185 if (pn) {
3186 --(*pos);
3187 while (*pos) {
3188 pn = pneigh_get_next(seq, pn, pos);
3189 if (!pn)
3190 break;
3191 }
3192 }
3193 return *pos ? NULL : pn;
3194}
3195
3196static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3197{
3198 struct neigh_seq_state *state = seq->private;
3199 void *rc;
3200 loff_t idxpos = *pos;
3201
3202 rc = neigh_get_idx(seq, &idxpos);
3203 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3204 rc = pneigh_get_idx(seq, &idxpos);
3205
3206 return rc;
3207}
3208
3209void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3210 __acquires(tbl->lock)
3211 __acquires(rcu_bh)
3212{
3213 struct neigh_seq_state *state = seq->private;
3214
3215 state->tbl = tbl;
3216 state->bucket = 0;
3217 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3218
3219 rcu_read_lock_bh();
3220 state->nht = rcu_dereference_bh(tbl->nht);
3221 read_lock(&tbl->lock);
3222
3223 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3224}
3225EXPORT_SYMBOL(neigh_seq_start);
3226
3227void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3228{
3229 struct neigh_seq_state *state;
3230 void *rc;
3231
3232 if (v == SEQ_START_TOKEN) {
3233 rc = neigh_get_first(seq);
3234 goto out;
3235 }
3236
3237 state = seq->private;
3238 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3239 rc = neigh_get_next(seq, v, NULL);
3240 if (rc)
3241 goto out;
3242 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3243 rc = pneigh_get_first(seq);
3244 } else {
3245 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3246 rc = pneigh_get_next(seq, v, NULL);
3247 }
3248out:
3249 ++(*pos);
3250 return rc;
3251}
3252EXPORT_SYMBOL(neigh_seq_next);
3253
3254void neigh_seq_stop(struct seq_file *seq, void *v)
3255 __releases(tbl->lock)
3256 __releases(rcu_bh)
3257{
3258 struct neigh_seq_state *state = seq->private;
3259 struct neigh_table *tbl = state->tbl;
3260
3261 read_unlock(&tbl->lock);
3262 rcu_read_unlock_bh();
3263}
3264EXPORT_SYMBOL(neigh_seq_stop);
3265
3266/* statistics via seq_file */
3267
3268static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3269{
3270 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3271 int cpu;
3272
3273 if (*pos == 0)
3274 return SEQ_START_TOKEN;
3275
3276 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3277 if (!cpu_possible(cpu))
3278 continue;
3279 *pos = cpu+1;
3280 return per_cpu_ptr(tbl->stats, cpu);
3281 }
3282 return NULL;
3283}
3284
3285static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3286{
3287 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3288 int cpu;
3289
3290 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3291 if (!cpu_possible(cpu))
3292 continue;
3293 *pos = cpu+1;
3294 return per_cpu_ptr(tbl->stats, cpu);
3295 }
3296 return NULL;
3297}
3298
3299static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3300{
3301
3302}
3303
3304static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3305{
3306 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3307 struct neigh_statistics *st = v;
3308
3309 if (v == SEQ_START_TOKEN) {
3310 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3311 return 0;
3312 }
3313
3314 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3315 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3316 atomic_read(&tbl->entries),
3317
3318 st->allocs,
3319 st->destroys,
3320 st->hash_grows,
3321
3322 st->lookups,
3323 st->hits,
3324
3325 st->res_failed,
3326
3327 st->rcv_probes_mcast,
3328 st->rcv_probes_ucast,
3329
3330 st->periodic_gc_runs,
3331 st->forced_gc_runs,
3332 st->unres_discards,
3333 st->table_fulls
3334 );
3335
3336 return 0;
3337}
3338
3339static const struct seq_operations neigh_stat_seq_ops = {
3340 .start = neigh_stat_seq_start,
3341 .next = neigh_stat_seq_next,
3342 .stop = neigh_stat_seq_stop,
3343 .show = neigh_stat_seq_show,
3344};
3345#endif /* CONFIG_PROC_FS */
3346
3347static void __neigh_notify(struct neighbour *n, int type, int flags,
3348 u32 pid)
3349{
3350 struct net *net = dev_net(n->dev);
3351 struct sk_buff *skb;
3352 int err = -ENOBUFS;
3353
3354 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3355 if (skb == NULL)
3356 goto errout;
3357
3358 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3359 if (err < 0) {
3360 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3361 WARN_ON(err == -EMSGSIZE);
3362 kfree_skb(skb);
3363 goto errout;
3364 }
3365 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3366 return;
3367errout:
3368 if (err < 0)
3369 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3370}
3371
3372void neigh_app_ns(struct neighbour *n)
3373{
3374 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3375}
3376EXPORT_SYMBOL(neigh_app_ns);
3377
3378#ifdef CONFIG_SYSCTL
3379static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3380
3381static int proc_unres_qlen(struct ctl_table *ctl, int write,
3382 void __user *buffer, size_t *lenp, loff_t *ppos)
3383{
3384 int size, ret;
3385 struct ctl_table tmp = *ctl;
3386
3387 tmp.extra1 = SYSCTL_ZERO;
3388 tmp.extra2 = &unres_qlen_max;
3389 tmp.data = &size;
3390
3391 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3392 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3393
3394 if (write && !ret)
3395 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3396 return ret;
3397}
3398
3399static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3400 int family)
3401{
3402 switch (family) {
3403 case AF_INET:
3404 return __in_dev_arp_parms_get_rcu(dev);
3405 case AF_INET6:
3406 return __in6_dev_nd_parms_get_rcu(dev);
3407 }
3408 return NULL;
3409}
3410
3411static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3412 int index)
3413{
3414 struct net_device *dev;
3415 int family = neigh_parms_family(p);
3416
3417 rcu_read_lock();
3418 for_each_netdev_rcu(net, dev) {
3419 struct neigh_parms *dst_p =
3420 neigh_get_dev_parms_rcu(dev, family);
3421
3422 if (dst_p && !test_bit(index, dst_p->data_state))
3423 dst_p->data[index] = p->data[index];
3424 }
3425 rcu_read_unlock();
3426}
3427
3428static void neigh_proc_update(struct ctl_table *ctl, int write)
3429{
3430 struct net_device *dev = ctl->extra1;
3431 struct neigh_parms *p = ctl->extra2;
3432 struct net *net = neigh_parms_net(p);
3433 int index = (int *) ctl->data - p->data;
3434
3435 if (!write)
3436 return;
3437
3438 set_bit(index, p->data_state);
3439 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3440 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3441 if (!dev) /* NULL dev means this is default value */
3442 neigh_copy_dflt_parms(net, p, index);
3443}
3444
3445static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3446 void __user *buffer,
3447 size_t *lenp, loff_t *ppos)
3448{
3449 struct ctl_table tmp = *ctl;
3450 int ret;
3451
3452 tmp.extra1 = SYSCTL_ZERO;
3453 tmp.extra2 = SYSCTL_INT_MAX;
3454
3455 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3456 neigh_proc_update(ctl, write);
3457 return ret;
3458}
3459
3460int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3461 void __user *buffer, size_t *lenp, loff_t *ppos)
3462{
3463 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3464
3465 neigh_proc_update(ctl, write);
3466 return ret;
3467}
3468EXPORT_SYMBOL(neigh_proc_dointvec);
3469
3470int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3471 void __user *buffer,
3472 size_t *lenp, loff_t *ppos)
3473{
3474 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3475
3476 neigh_proc_update(ctl, write);
3477 return ret;
3478}
3479EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3480
3481static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3482 void __user *buffer,
3483 size_t *lenp, loff_t *ppos)
3484{
3485 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3486
3487 neigh_proc_update(ctl, write);
3488 return ret;
3489}
3490
3491int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3492 void __user *buffer,
3493 size_t *lenp, loff_t *ppos)
3494{
3495 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3496
3497 neigh_proc_update(ctl, write);
3498 return ret;
3499}
3500EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3501
3502static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3503 void __user *buffer,
3504 size_t *lenp, loff_t *ppos)
3505{
3506 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3507
3508 neigh_proc_update(ctl, write);
3509 return ret;
3510}
3511
3512static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3513 void __user *buffer,
3514 size_t *lenp, loff_t *ppos)
3515{
3516 struct neigh_parms *p = ctl->extra2;
3517 int ret;
3518
3519 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3520 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3521 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3522 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3523 else
3524 ret = -1;
3525
3526 if (write && ret == 0) {
3527 /* update reachable_time as well, otherwise, the change will
3528 * only be effective after the next time neigh_periodic_work
3529 * decides to recompute it
3530 */
3531 p->reachable_time =
3532 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3533 }
3534 return ret;
3535}
3536
3537#define NEIGH_PARMS_DATA_OFFSET(index) \
3538 (&((struct neigh_parms *) 0)->data[index])
3539
3540#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3541 [NEIGH_VAR_ ## attr] = { \
3542 .procname = name, \
3543 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3544 .maxlen = sizeof(int), \
3545 .mode = mval, \
3546 .proc_handler = proc, \
3547 }
3548
3549#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3550 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3551
3552#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3553 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3554
3555#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3556 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3557
3558#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3559 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3560
3561#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3562 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3563
3564#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3565 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3566
3567static struct neigh_sysctl_table {
3568 struct ctl_table_header *sysctl_header;
3569 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3570} neigh_sysctl_template __read_mostly = {
3571 .neigh_vars = {
3572 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3573 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3574 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3575 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3576 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3577 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3578 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3579 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3580 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3581 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3582 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3583 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3584 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3585 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3586 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3587 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3588 [NEIGH_VAR_GC_INTERVAL] = {
3589 .procname = "gc_interval",
3590 .maxlen = sizeof(int),
3591 .mode = 0644,
3592 .proc_handler = proc_dointvec_jiffies,
3593 },
3594 [NEIGH_VAR_GC_THRESH1] = {
3595 .procname = "gc_thresh1",
3596 .maxlen = sizeof(int),
3597 .mode = 0644,
3598 .extra1 = SYSCTL_ZERO,
3599 .extra2 = SYSCTL_INT_MAX,
3600 .proc_handler = proc_dointvec_minmax,
3601 },
3602 [NEIGH_VAR_GC_THRESH2] = {
3603 .procname = "gc_thresh2",
3604 .maxlen = sizeof(int),
3605 .mode = 0644,
3606 .extra1 = SYSCTL_ZERO,
3607 .extra2 = SYSCTL_INT_MAX,
3608 .proc_handler = proc_dointvec_minmax,
3609 },
3610 [NEIGH_VAR_GC_THRESH3] = {
3611 .procname = "gc_thresh3",
3612 .maxlen = sizeof(int),
3613 .mode = 0644,
3614 .extra1 = SYSCTL_ZERO,
3615 .extra2 = SYSCTL_INT_MAX,
3616 .proc_handler = proc_dointvec_minmax,
3617 },
3618 {},
3619 },
3620};
3621
3622int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3623 proc_handler *handler)
3624{
3625 int i;
3626 struct neigh_sysctl_table *t;
3627 const char *dev_name_source;
3628 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3629 char *p_name;
3630
3631 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3632 if (!t)
3633 goto err;
3634
3635 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3636 t->neigh_vars[i].data += (long) p;
3637 t->neigh_vars[i].extra1 = dev;
3638 t->neigh_vars[i].extra2 = p;
3639 }
3640
3641 if (dev) {
3642 dev_name_source = dev->name;
3643 /* Terminate the table early */
3644 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3645 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3646 } else {
3647 struct neigh_table *tbl = p->tbl;
3648 dev_name_source = "default";
3649 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3650 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3651 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3652 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3653 }
3654
3655 if (handler) {
3656 /* RetransTime */
3657 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3658 /* ReachableTime */
3659 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3660 /* RetransTime (in milliseconds)*/
3661 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3662 /* ReachableTime (in milliseconds) */
3663 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3664 } else {
3665 /* Those handlers will update p->reachable_time after
3666 * base_reachable_time(_ms) is set to ensure the new timer starts being
3667 * applied after the next neighbour update instead of waiting for
3668 * neigh_periodic_work to update its value (can be multiple minutes)
3669 * So any handler that replaces them should do this as well
3670 */
3671 /* ReachableTime */
3672 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3673 neigh_proc_base_reachable_time;
3674 /* ReachableTime (in milliseconds) */
3675 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3676 neigh_proc_base_reachable_time;
3677 }
3678
3679 /* Don't export sysctls to unprivileged users */
3680 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3681 t->neigh_vars[0].procname = NULL;
3682
3683 switch (neigh_parms_family(p)) {
3684 case AF_INET:
3685 p_name = "ipv4";
3686 break;
3687 case AF_INET6:
3688 p_name = "ipv6";
3689 break;
3690 default:
3691 BUG();
3692 }
3693
3694 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3695 p_name, dev_name_source);
3696 t->sysctl_header =
3697 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3698 if (!t->sysctl_header)
3699 goto free;
3700
3701 p->sysctl_table = t;
3702 return 0;
3703
3704free:
3705 kfree(t);
3706err:
3707 return -ENOBUFS;
3708}
3709EXPORT_SYMBOL(neigh_sysctl_register);
3710
3711void neigh_sysctl_unregister(struct neigh_parms *p)
3712{
3713 if (p->sysctl_table) {
3714 struct neigh_sysctl_table *t = p->sysctl_table;
3715 p->sysctl_table = NULL;
3716 unregister_net_sysctl_table(t->sysctl_header);
3717 kfree(t);
3718 }
3719}
3720EXPORT_SYMBOL(neigh_sysctl_unregister);
3721
3722#endif /* CONFIG_SYSCTL */
3723
3724static int __init neigh_init(void)
3725{
3726 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3727 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3728 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3729
3730 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3731 0);
3732 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3733
3734 return 0;
3735}
3736
3737subsys_initcall(neigh_init);