Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
7 */
8#include <linux/errno.h>
9#include <linux/types.h>
10#include <linux/socket.h>
11#include <linux/in.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/string.h>
15#include <linux/sockios.h>
16#include <linux/net.h>
17#include <linux/slab.h>
18#include <net/ax25.h>
19#include <linux/inet.h>
20#include <linux/netdevice.h>
21#include <net/arp.h>
22#include <linux/if_arp.h>
23#include <linux/skbuff.h>
24#include <net/sock.h>
25#include <linux/uaccess.h>
26#include <linux/fcntl.h>
27#include <linux/termios.h> /* For TIOCINQ/OUTQ */
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/notifier.h>
31#include <linux/init.h>
32#include <linux/spinlock.h>
33#include <net/netrom.h>
34#include <linux/seq_file.h>
35#include <linux/export.h>
36
37static unsigned int nr_neigh_no = 1;
38
39static HLIST_HEAD(nr_node_list);
40static DEFINE_SPINLOCK(nr_node_list_lock);
41static HLIST_HEAD(nr_neigh_list);
42static DEFINE_SPINLOCK(nr_neigh_list_lock);
43
44static struct nr_node *nr_node_get(ax25_address *callsign)
45{
46 struct nr_node *found = NULL;
47 struct nr_node *nr_node;
48
49 spin_lock_bh(&nr_node_list_lock);
50 nr_node_for_each(nr_node, &nr_node_list)
51 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52 nr_node_hold(nr_node);
53 found = nr_node;
54 break;
55 }
56 spin_unlock_bh(&nr_node_list_lock);
57 return found;
58}
59
60static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61 struct net_device *dev)
62{
63 struct nr_neigh *found = NULL;
64 struct nr_neigh *nr_neigh;
65
66 spin_lock_bh(&nr_neigh_list_lock);
67 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69 nr_neigh->dev == dev) {
70 nr_neigh_hold(nr_neigh);
71 found = nr_neigh;
72 break;
73 }
74 spin_unlock_bh(&nr_neigh_list_lock);
75 return found;
76}
77
78static void nr_remove_neigh(struct nr_neigh *);
79
80/* re-sort the routes in quality order. */
81static void re_sort_routes(struct nr_node *nr_node, int x, int y)
82{
83 if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84 if (nr_node->which == x)
85 nr_node->which = y;
86 else if (nr_node->which == y)
87 nr_node->which = x;
88
89 swap(nr_node->routes[x], nr_node->routes[y]);
90 }
91}
92
93/*
94 * Add a new route to a node, and in the process add the node and the
95 * neighbour if it is new.
96 */
97static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99 int quality, int obs_count)
100{
101 struct nr_node *nr_node;
102 struct nr_neigh *nr_neigh;
103 int i, found;
104 struct net_device *odev;
105
106 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
107 dev_put(odev);
108 return -EINVAL;
109 }
110
111 nr_node = nr_node_get(nr);
112
113 nr_neigh = nr_neigh_get_dev(ax25, dev);
114
115 /*
116 * The L2 link to a neighbour has failed in the past
117 * and now a frame comes from this neighbour. We assume
118 * it was a temporary trouble with the link and reset the
119 * routes now (and not wait for a node broadcast).
120 */
121 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122 struct nr_node *nr_nodet;
123
124 spin_lock_bh(&nr_node_list_lock);
125 nr_node_for_each(nr_nodet, &nr_node_list) {
126 nr_node_lock(nr_nodet);
127 for (i = 0; i < nr_nodet->count; i++)
128 if (nr_nodet->routes[i].neighbour == nr_neigh)
129 if (i < nr_nodet->which)
130 nr_nodet->which = i;
131 nr_node_unlock(nr_nodet);
132 }
133 spin_unlock_bh(&nr_node_list_lock);
134 }
135
136 if (nr_neigh != NULL)
137 nr_neigh->failed = 0;
138
139 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140 nr_neigh_put(nr_neigh);
141 nr_node_put(nr_node);
142 return 0;
143 }
144
145 if (nr_neigh == NULL) {
146 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
147 if (nr_node)
148 nr_node_put(nr_node);
149 return -ENOMEM;
150 }
151
152 nr_neigh->callsign = *ax25;
153 nr_neigh->digipeat = NULL;
154 nr_neigh->ax25 = NULL;
155 nr_neigh->dev = dev;
156 nr_neigh->quality = sysctl_netrom_default_path_quality;
157 nr_neigh->locked = 0;
158 nr_neigh->count = 0;
159 nr_neigh->number = nr_neigh_no++;
160 nr_neigh->failed = 0;
161 refcount_set(&nr_neigh->refcount, 1);
162
163 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164 nr_neigh->digipeat = kmemdup(ax25_digi,
165 sizeof(*ax25_digi),
166 GFP_KERNEL);
167 if (nr_neigh->digipeat == NULL) {
168 kfree(nr_neigh);
169 if (nr_node)
170 nr_node_put(nr_node);
171 return -ENOMEM;
172 }
173 }
174
175 spin_lock_bh(&nr_neigh_list_lock);
176 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177 nr_neigh_hold(nr_neigh);
178 spin_unlock_bh(&nr_neigh_list_lock);
179 }
180
181 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182 nr_neigh->quality = quality;
183
184 if (nr_node == NULL) {
185 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
186 if (nr_neigh)
187 nr_neigh_put(nr_neigh);
188 return -ENOMEM;
189 }
190
191 nr_node->callsign = *nr;
192 strcpy(nr_node->mnemonic, mnemonic);
193
194 nr_node->which = 0;
195 nr_node->count = 1;
196 refcount_set(&nr_node->refcount, 1);
197 spin_lock_init(&nr_node->node_lock);
198
199 nr_node->routes[0].quality = quality;
200 nr_node->routes[0].obs_count = obs_count;
201 nr_node->routes[0].neighbour = nr_neigh;
202
203 nr_neigh_hold(nr_neigh);
204 nr_neigh->count++;
205
206 spin_lock_bh(&nr_node_list_lock);
207 hlist_add_head(&nr_node->node_node, &nr_node_list);
208 /* refcount initialized at 1 */
209 spin_unlock_bh(&nr_node_list_lock);
210
211 nr_neigh_put(nr_neigh);
212 return 0;
213 }
214 nr_node_lock(nr_node);
215
216 if (quality != 0)
217 strcpy(nr_node->mnemonic, mnemonic);
218
219 for (found = 0, i = 0; i < nr_node->count; i++) {
220 if (nr_node->routes[i].neighbour == nr_neigh) {
221 nr_node->routes[i].quality = quality;
222 nr_node->routes[i].obs_count = obs_count;
223 found = 1;
224 break;
225 }
226 }
227
228 if (!found) {
229 /* We have space at the bottom, slot it in */
230 if (nr_node->count < 3) {
231 nr_node->routes[2] = nr_node->routes[1];
232 nr_node->routes[1] = nr_node->routes[0];
233
234 nr_node->routes[0].quality = quality;
235 nr_node->routes[0].obs_count = obs_count;
236 nr_node->routes[0].neighbour = nr_neigh;
237
238 nr_node->which++;
239 nr_node->count++;
240 nr_neigh_hold(nr_neigh);
241 nr_neigh->count++;
242 } else {
243 /* It must be better than the worst */
244 if (quality > nr_node->routes[2].quality) {
245 nr_node->routes[2].neighbour->count--;
246 nr_neigh_put(nr_node->routes[2].neighbour);
247
248 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
249 nr_remove_neigh(nr_node->routes[2].neighbour);
250
251 nr_node->routes[2].quality = quality;
252 nr_node->routes[2].obs_count = obs_count;
253 nr_node->routes[2].neighbour = nr_neigh;
254
255 nr_neigh_hold(nr_neigh);
256 nr_neigh->count++;
257 }
258 }
259 }
260
261 /* Now re-sort the routes in quality order */
262 switch (nr_node->count) {
263 case 3:
264 re_sort_routes(nr_node, 0, 1);
265 re_sort_routes(nr_node, 1, 2);
266 fallthrough;
267 case 2:
268 re_sort_routes(nr_node, 0, 1);
269 case 1:
270 break;
271 }
272
273 for (i = 0; i < nr_node->count; i++) {
274 if (nr_node->routes[i].neighbour == nr_neigh) {
275 if (i < nr_node->which)
276 nr_node->which = i;
277 break;
278 }
279 }
280
281 nr_neigh_put(nr_neigh);
282 nr_node_unlock(nr_node);
283 nr_node_put(nr_node);
284 return 0;
285}
286
287static inline void __nr_remove_node(struct nr_node *nr_node)
288{
289 hlist_del_init(&nr_node->node_node);
290 nr_node_put(nr_node);
291}
292
293#define nr_remove_node_locked(__node) \
294 __nr_remove_node(__node)
295
296static void nr_remove_node(struct nr_node *nr_node)
297{
298 spin_lock_bh(&nr_node_list_lock);
299 __nr_remove_node(nr_node);
300 spin_unlock_bh(&nr_node_list_lock);
301}
302
303static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
304{
305 hlist_del_init(&nr_neigh->neigh_node);
306 nr_neigh_put(nr_neigh);
307}
308
309#define nr_remove_neigh_locked(__neigh) \
310 __nr_remove_neigh(__neigh)
311
312static void nr_remove_neigh(struct nr_neigh *nr_neigh)
313{
314 spin_lock_bh(&nr_neigh_list_lock);
315 __nr_remove_neigh(nr_neigh);
316 spin_unlock_bh(&nr_neigh_list_lock);
317}
318
319/*
320 * "Delete" a node. Strictly speaking remove a route to a node. The node
321 * is only deleted if no routes are left to it.
322 */
323static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
324{
325 struct nr_node *nr_node;
326 struct nr_neigh *nr_neigh;
327 int i;
328
329 nr_node = nr_node_get(callsign);
330
331 if (nr_node == NULL)
332 return -EINVAL;
333
334 nr_neigh = nr_neigh_get_dev(neighbour, dev);
335
336 if (nr_neigh == NULL) {
337 nr_node_put(nr_node);
338 return -EINVAL;
339 }
340
341 nr_node_lock(nr_node);
342 for (i = 0; i < nr_node->count; i++) {
343 if (nr_node->routes[i].neighbour == nr_neigh) {
344 nr_neigh->count--;
345 nr_neigh_put(nr_neigh);
346
347 if (nr_neigh->count == 0 && !nr_neigh->locked)
348 nr_remove_neigh(nr_neigh);
349 nr_neigh_put(nr_neigh);
350
351 nr_node->count--;
352
353 if (nr_node->count == 0) {
354 nr_remove_node(nr_node);
355 } else {
356 switch (i) {
357 case 0:
358 nr_node->routes[0] = nr_node->routes[1];
359 fallthrough;
360 case 1:
361 nr_node->routes[1] = nr_node->routes[2];
362 case 2:
363 break;
364 }
365 nr_node_put(nr_node);
366 }
367 nr_node_unlock(nr_node);
368
369 return 0;
370 }
371 }
372 nr_neigh_put(nr_neigh);
373 nr_node_unlock(nr_node);
374 nr_node_put(nr_node);
375
376 return -EINVAL;
377}
378
379/*
380 * Lock a neighbour with a quality.
381 */
382static int __must_check nr_add_neigh(ax25_address *callsign,
383 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
384{
385 struct nr_neigh *nr_neigh;
386
387 nr_neigh = nr_neigh_get_dev(callsign, dev);
388 if (nr_neigh) {
389 nr_neigh->quality = quality;
390 nr_neigh->locked = 1;
391 nr_neigh_put(nr_neigh);
392 return 0;
393 }
394
395 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
396 return -ENOMEM;
397
398 nr_neigh->callsign = *callsign;
399 nr_neigh->digipeat = NULL;
400 nr_neigh->ax25 = NULL;
401 nr_neigh->dev = dev;
402 nr_neigh->quality = quality;
403 nr_neigh->locked = 1;
404 nr_neigh->count = 0;
405 nr_neigh->number = nr_neigh_no++;
406 nr_neigh->failed = 0;
407 refcount_set(&nr_neigh->refcount, 1);
408
409 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
410 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
411 GFP_KERNEL);
412 if (nr_neigh->digipeat == NULL) {
413 kfree(nr_neigh);
414 return -ENOMEM;
415 }
416 }
417
418 spin_lock_bh(&nr_neigh_list_lock);
419 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
420 /* refcount is initialized at 1 */
421 spin_unlock_bh(&nr_neigh_list_lock);
422
423 return 0;
424}
425
426/*
427 * "Delete" a neighbour. The neighbour is only removed if the number
428 * of nodes that may use it is zero.
429 */
430static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
431{
432 struct nr_neigh *nr_neigh;
433
434 nr_neigh = nr_neigh_get_dev(callsign, dev);
435
436 if (nr_neigh == NULL) return -EINVAL;
437
438 nr_neigh->quality = quality;
439 nr_neigh->locked = 0;
440
441 if (nr_neigh->count == 0)
442 nr_remove_neigh(nr_neigh);
443 nr_neigh_put(nr_neigh);
444
445 return 0;
446}
447
448/*
449 * Decrement the obsolescence count by one. If a route is reduced to a
450 * count of zero, remove it. Also remove any unlocked neighbours with
451 * zero nodes routing via it.
452 */
453static int nr_dec_obs(void)
454{
455 struct nr_neigh *nr_neigh;
456 struct nr_node *s;
457 struct hlist_node *nodet;
458 int i;
459
460 spin_lock_bh(&nr_node_list_lock);
461 nr_node_for_each_safe(s, nodet, &nr_node_list) {
462 nr_node_lock(s);
463 for (i = 0; i < s->count; i++) {
464 switch (s->routes[i].obs_count) {
465 case 0: /* A locked entry */
466 break;
467
468 case 1: /* From 1 -> 0 */
469 nr_neigh = s->routes[i].neighbour;
470
471 nr_neigh->count--;
472 nr_neigh_put(nr_neigh);
473
474 if (nr_neigh->count == 0 && !nr_neigh->locked)
475 nr_remove_neigh(nr_neigh);
476
477 s->count--;
478
479 switch (i) {
480 case 0:
481 s->routes[0] = s->routes[1];
482 fallthrough;
483 case 1:
484 s->routes[1] = s->routes[2];
485 case 2:
486 break;
487 }
488 break;
489
490 default:
491 s->routes[i].obs_count--;
492 break;
493
494 }
495 }
496
497 if (s->count <= 0)
498 nr_remove_node_locked(s);
499 nr_node_unlock(s);
500 }
501 spin_unlock_bh(&nr_node_list_lock);
502
503 return 0;
504}
505
506/*
507 * A device has been removed. Remove its routes and neighbours.
508 */
509void nr_rt_device_down(struct net_device *dev)
510{
511 struct nr_neigh *s;
512 struct hlist_node *nodet, *node2t;
513 struct nr_node *t;
514 int i;
515
516 spin_lock_bh(&nr_neigh_list_lock);
517 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
518 if (s->dev == dev) {
519 spin_lock_bh(&nr_node_list_lock);
520 nr_node_for_each_safe(t, node2t, &nr_node_list) {
521 nr_node_lock(t);
522 for (i = 0; i < t->count; i++) {
523 if (t->routes[i].neighbour == s) {
524 t->count--;
525
526 switch (i) {
527 case 0:
528 t->routes[0] = t->routes[1];
529 fallthrough;
530 case 1:
531 t->routes[1] = t->routes[2];
532 case 2:
533 break;
534 }
535 }
536 }
537
538 if (t->count <= 0)
539 nr_remove_node_locked(t);
540 nr_node_unlock(t);
541 }
542 spin_unlock_bh(&nr_node_list_lock);
543
544 nr_remove_neigh_locked(s);
545 }
546 }
547 spin_unlock_bh(&nr_neigh_list_lock);
548}
549
550/*
551 * Check that the device given is a valid AX.25 interface that is "up".
552 * Or a valid ethernet interface with an AX.25 callsign binding.
553 */
554static struct net_device *nr_ax25_dev_get(char *devname)
555{
556 struct net_device *dev;
557
558 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
559 return NULL;
560
561 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
562 return dev;
563
564 dev_put(dev);
565 return NULL;
566}
567
568/*
569 * Find the first active NET/ROM device, usually "nr0".
570 */
571struct net_device *nr_dev_first(void)
572{
573 struct net_device *dev, *first = NULL;
574
575 rcu_read_lock();
576 for_each_netdev_rcu(&init_net, dev) {
577 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
578 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
579 first = dev;
580 }
581 if (first)
582 dev_hold(first);
583 rcu_read_unlock();
584
585 return first;
586}
587
588/*
589 * Find the NET/ROM device for the given callsign.
590 */
591struct net_device *nr_dev_get(ax25_address *addr)
592{
593 struct net_device *dev;
594
595 rcu_read_lock();
596 for_each_netdev_rcu(&init_net, dev) {
597 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
598 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
599 dev_hold(dev);
600 goto out;
601 }
602 }
603 dev = NULL;
604out:
605 rcu_read_unlock();
606 return dev;
607}
608
609static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
610 ax25_address *digipeaters)
611{
612 int i;
613
614 if (ndigis == 0)
615 return NULL;
616
617 for (i = 0; i < ndigis; i++) {
618 digi->calls[i] = digipeaters[i];
619 digi->repeated[i] = 0;
620 }
621
622 digi->ndigi = ndigis;
623 digi->lastrepeat = -1;
624
625 return digi;
626}
627
628/*
629 * Handle the ioctls that control the routing functions.
630 */
631int nr_rt_ioctl(unsigned int cmd, void __user *arg)
632{
633 struct nr_route_struct nr_route;
634 struct net_device *dev;
635 ax25_digi digi;
636 int ret;
637
638 switch (cmd) {
639 case SIOCADDRT:
640 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
641 return -EFAULT;
642 if (nr_route.ndigis > AX25_MAX_DIGIS)
643 return -EINVAL;
644 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
645 return -EINVAL;
646 switch (nr_route.type) {
647 case NETROM_NODE:
648 if (strnlen(nr_route.mnemonic, 7) == 7) {
649 ret = -EINVAL;
650 break;
651 }
652
653 ret = nr_add_node(&nr_route.callsign,
654 nr_route.mnemonic,
655 &nr_route.neighbour,
656 nr_call_to_digi(&digi, nr_route.ndigis,
657 nr_route.digipeaters),
658 dev, nr_route.quality,
659 nr_route.obs_count);
660 break;
661 case NETROM_NEIGH:
662 ret = nr_add_neigh(&nr_route.callsign,
663 nr_call_to_digi(&digi, nr_route.ndigis,
664 nr_route.digipeaters),
665 dev, nr_route.quality);
666 break;
667 default:
668 ret = -EINVAL;
669 }
670 dev_put(dev);
671 return ret;
672
673 case SIOCDELRT:
674 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
675 return -EFAULT;
676 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
677 return -EINVAL;
678 switch (nr_route.type) {
679 case NETROM_NODE:
680 ret = nr_del_node(&nr_route.callsign,
681 &nr_route.neighbour, dev);
682 break;
683 case NETROM_NEIGH:
684 ret = nr_del_neigh(&nr_route.callsign,
685 dev, nr_route.quality);
686 break;
687 default:
688 ret = -EINVAL;
689 }
690 dev_put(dev);
691 return ret;
692
693 case SIOCNRDECOBS:
694 return nr_dec_obs();
695
696 default:
697 return -EINVAL;
698 }
699
700 return 0;
701}
702
703/*
704 * A level 2 link has timed out, therefore it appears to be a poor link,
705 * then don't use that neighbour until it is reset.
706 */
707void nr_link_failed(ax25_cb *ax25, int reason)
708{
709 struct nr_neigh *s, *nr_neigh = NULL;
710 struct nr_node *nr_node = NULL;
711
712 spin_lock_bh(&nr_neigh_list_lock);
713 nr_neigh_for_each(s, &nr_neigh_list) {
714 if (s->ax25 == ax25) {
715 nr_neigh_hold(s);
716 nr_neigh = s;
717 break;
718 }
719 }
720 spin_unlock_bh(&nr_neigh_list_lock);
721
722 if (nr_neigh == NULL)
723 return;
724
725 nr_neigh->ax25 = NULL;
726 ax25_cb_put(ax25);
727
728 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
729 nr_neigh_put(nr_neigh);
730 return;
731 }
732 spin_lock_bh(&nr_node_list_lock);
733 nr_node_for_each(nr_node, &nr_node_list) {
734 nr_node_lock(nr_node);
735 if (nr_node->which < nr_node->count &&
736 nr_node->routes[nr_node->which].neighbour == nr_neigh)
737 nr_node->which++;
738 nr_node_unlock(nr_node);
739 }
740 spin_unlock_bh(&nr_node_list_lock);
741 nr_neigh_put(nr_neigh);
742}
743
744/*
745 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
746 * indicates an internally generated frame.
747 */
748int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
749{
750 ax25_address *nr_src, *nr_dest;
751 struct nr_neigh *nr_neigh;
752 struct nr_node *nr_node;
753 struct net_device *dev;
754 unsigned char *dptr;
755 ax25_cb *ax25s;
756 int ret;
757 struct sk_buff *skbn;
758
759
760 nr_src = (ax25_address *)(skb->data + 0);
761 nr_dest = (ax25_address *)(skb->data + 7);
762
763 if (ax25 != NULL) {
764 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
765 ax25->ax25_dev->dev, 0,
766 sysctl_netrom_obsolescence_count_initialiser);
767 if (ret)
768 return ret;
769 }
770
771 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
772 if (ax25 == NULL) /* Its from me */
773 ret = nr_loopback_queue(skb);
774 else
775 ret = nr_rx_frame(skb, dev);
776 dev_put(dev);
777 return ret;
778 }
779
780 if (!sysctl_netrom_routing_control && ax25 != NULL)
781 return 0;
782
783 /* Its Time-To-Live has expired */
784 if (skb->data[14] == 1) {
785 return 0;
786 }
787
788 nr_node = nr_node_get(nr_dest);
789 if (nr_node == NULL)
790 return 0;
791 nr_node_lock(nr_node);
792
793 if (nr_node->which >= nr_node->count) {
794 nr_node_unlock(nr_node);
795 nr_node_put(nr_node);
796 return 0;
797 }
798
799 nr_neigh = nr_node->routes[nr_node->which].neighbour;
800
801 if ((dev = nr_dev_first()) == NULL) {
802 nr_node_unlock(nr_node);
803 nr_node_put(nr_node);
804 return 0;
805 }
806
807 /* We are going to change the netrom headers so we should get our
808 own skb, we also did not know until now how much header space
809 we had to reserve... - RXQ */
810 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
811 nr_node_unlock(nr_node);
812 nr_node_put(nr_node);
813 dev_put(dev);
814 return 0;
815 }
816 kfree_skb(skb);
817 skb=skbn;
818 skb->data[14]--;
819
820 dptr = skb_push(skb, 1);
821 *dptr = AX25_P_NETROM;
822
823 ax25s = nr_neigh->ax25;
824 nr_neigh->ax25 = ax25_send_frame(skb, 256,
825 (ax25_address *)dev->dev_addr,
826 &nr_neigh->callsign,
827 nr_neigh->digipeat, nr_neigh->dev);
828 if (ax25s)
829 ax25_cb_put(ax25s);
830
831 dev_put(dev);
832 ret = (nr_neigh->ax25 != NULL);
833 nr_node_unlock(nr_node);
834 nr_node_put(nr_node);
835
836 return ret;
837}
838
839#ifdef CONFIG_PROC_FS
840
841static void *nr_node_start(struct seq_file *seq, loff_t *pos)
842 __acquires(&nr_node_list_lock)
843{
844 spin_lock_bh(&nr_node_list_lock);
845 return seq_hlist_start_head(&nr_node_list, *pos);
846}
847
848static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
849{
850 return seq_hlist_next(v, &nr_node_list, pos);
851}
852
853static void nr_node_stop(struct seq_file *seq, void *v)
854 __releases(&nr_node_list_lock)
855{
856 spin_unlock_bh(&nr_node_list_lock);
857}
858
859static int nr_node_show(struct seq_file *seq, void *v)
860{
861 char buf[11];
862 int i;
863
864 if (v == SEQ_START_TOKEN)
865 seq_puts(seq,
866 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
867 else {
868 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
869 node_node);
870
871 nr_node_lock(nr_node);
872 seq_printf(seq, "%-9s %-7s %d %d",
873 ax2asc(buf, &nr_node->callsign),
874 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
875 nr_node->which + 1,
876 nr_node->count);
877
878 for (i = 0; i < nr_node->count; i++) {
879 seq_printf(seq, " %3d %d %05d",
880 nr_node->routes[i].quality,
881 nr_node->routes[i].obs_count,
882 nr_node->routes[i].neighbour->number);
883 }
884 nr_node_unlock(nr_node);
885
886 seq_puts(seq, "\n");
887 }
888 return 0;
889}
890
891const struct seq_operations nr_node_seqops = {
892 .start = nr_node_start,
893 .next = nr_node_next,
894 .stop = nr_node_stop,
895 .show = nr_node_show,
896};
897
898static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
899 __acquires(&nr_neigh_list_lock)
900{
901 spin_lock_bh(&nr_neigh_list_lock);
902 return seq_hlist_start_head(&nr_neigh_list, *pos);
903}
904
905static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
906{
907 return seq_hlist_next(v, &nr_neigh_list, pos);
908}
909
910static void nr_neigh_stop(struct seq_file *seq, void *v)
911 __releases(&nr_neigh_list_lock)
912{
913 spin_unlock_bh(&nr_neigh_list_lock);
914}
915
916static int nr_neigh_show(struct seq_file *seq, void *v)
917{
918 char buf[11];
919 int i;
920
921 if (v == SEQ_START_TOKEN)
922 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
923 else {
924 struct nr_neigh *nr_neigh;
925
926 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
927 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
928 nr_neigh->number,
929 ax2asc(buf, &nr_neigh->callsign),
930 nr_neigh->dev ? nr_neigh->dev->name : "???",
931 nr_neigh->quality,
932 nr_neigh->locked,
933 nr_neigh->count,
934 nr_neigh->failed);
935
936 if (nr_neigh->digipeat != NULL) {
937 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
938 seq_printf(seq, " %s",
939 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
940 }
941
942 seq_puts(seq, "\n");
943 }
944 return 0;
945}
946
947const struct seq_operations nr_neigh_seqops = {
948 .start = nr_neigh_start,
949 .next = nr_neigh_next,
950 .stop = nr_neigh_stop,
951 .show = nr_neigh_show,
952};
953#endif
954
955/*
956 * Free all memory associated with the nodes and routes lists.
957 */
958void nr_rt_free(void)
959{
960 struct nr_neigh *s = NULL;
961 struct nr_node *t = NULL;
962 struct hlist_node *nodet;
963
964 spin_lock_bh(&nr_neigh_list_lock);
965 spin_lock_bh(&nr_node_list_lock);
966 nr_node_for_each_safe(t, nodet, &nr_node_list) {
967 nr_node_lock(t);
968 nr_remove_node_locked(t);
969 nr_node_unlock(t);
970 }
971 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
972 while(s->count) {
973 s->count--;
974 nr_neigh_put(s);
975 }
976 nr_remove_neigh_locked(s);
977 }
978 spin_unlock_bh(&nr_node_list_lock);
979 spin_unlock_bh(&nr_neigh_list_lock);
980}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
7 */
8#include <linux/errno.h>
9#include <linux/types.h>
10#include <linux/socket.h>
11#include <linux/in.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/string.h>
15#include <linux/sockios.h>
16#include <linux/net.h>
17#include <linux/slab.h>
18#include <net/ax25.h>
19#include <linux/inet.h>
20#include <linux/netdevice.h>
21#include <net/arp.h>
22#include <linux/if_arp.h>
23#include <linux/skbuff.h>
24#include <net/sock.h>
25#include <linux/uaccess.h>
26#include <linux/fcntl.h>
27#include <linux/termios.h> /* For TIOCINQ/OUTQ */
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/notifier.h>
31#include <linux/init.h>
32#include <linux/spinlock.h>
33#include <net/netrom.h>
34#include <linux/seq_file.h>
35#include <linux/export.h>
36
37static unsigned int nr_neigh_no = 1;
38
39static HLIST_HEAD(nr_node_list);
40static DEFINE_SPINLOCK(nr_node_list_lock);
41static HLIST_HEAD(nr_neigh_list);
42static DEFINE_SPINLOCK(nr_neigh_list_lock);
43
44static struct nr_node *nr_node_get(ax25_address *callsign)
45{
46 struct nr_node *found = NULL;
47 struct nr_node *nr_node;
48
49 spin_lock_bh(&nr_node_list_lock);
50 nr_node_for_each(nr_node, &nr_node_list)
51 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52 nr_node_hold(nr_node);
53 found = nr_node;
54 break;
55 }
56 spin_unlock_bh(&nr_node_list_lock);
57 return found;
58}
59
60static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61 struct net_device *dev)
62{
63 struct nr_neigh *found = NULL;
64 struct nr_neigh *nr_neigh;
65
66 spin_lock_bh(&nr_neigh_list_lock);
67 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69 nr_neigh->dev == dev) {
70 nr_neigh_hold(nr_neigh);
71 found = nr_neigh;
72 break;
73 }
74 spin_unlock_bh(&nr_neigh_list_lock);
75 return found;
76}
77
78static void nr_remove_neigh(struct nr_neigh *);
79
80/* re-sort the routes in quality order. */
81static void re_sort_routes(struct nr_node *nr_node, int x, int y)
82{
83 if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84 if (nr_node->which == x)
85 nr_node->which = y;
86 else if (nr_node->which == y)
87 nr_node->which = x;
88
89 swap(nr_node->routes[x], nr_node->routes[y]);
90 }
91}
92
93/*
94 * Add a new route to a node, and in the process add the node and the
95 * neighbour if it is new.
96 */
97static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99 int quality, int obs_count)
100{
101 struct nr_node *nr_node;
102 struct nr_neigh *nr_neigh;
103 int i, found;
104 struct net_device *odev;
105
106 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
107 dev_put(odev);
108 return -EINVAL;
109 }
110
111 nr_node = nr_node_get(nr);
112
113 nr_neigh = nr_neigh_get_dev(ax25, dev);
114
115 /*
116 * The L2 link to a neighbour has failed in the past
117 * and now a frame comes from this neighbour. We assume
118 * it was a temporary trouble with the link and reset the
119 * routes now (and not wait for a node broadcast).
120 */
121 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122 struct nr_node *nr_nodet;
123
124 spin_lock_bh(&nr_node_list_lock);
125 nr_node_for_each(nr_nodet, &nr_node_list) {
126 nr_node_lock(nr_nodet);
127 for (i = 0; i < nr_nodet->count; i++)
128 if (nr_nodet->routes[i].neighbour == nr_neigh)
129 if (i < nr_nodet->which)
130 nr_nodet->which = i;
131 nr_node_unlock(nr_nodet);
132 }
133 spin_unlock_bh(&nr_node_list_lock);
134 }
135
136 if (nr_neigh != NULL)
137 nr_neigh->failed = 0;
138
139 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140 nr_neigh_put(nr_neigh);
141 nr_node_put(nr_node);
142 return 0;
143 }
144
145 if (nr_neigh == NULL) {
146 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
147 if (nr_node)
148 nr_node_put(nr_node);
149 return -ENOMEM;
150 }
151
152 nr_neigh->callsign = *ax25;
153 nr_neigh->digipeat = NULL;
154 nr_neigh->ax25 = NULL;
155 nr_neigh->dev = dev;
156 nr_neigh->quality = sysctl_netrom_default_path_quality;
157 nr_neigh->locked = 0;
158 nr_neigh->count = 0;
159 nr_neigh->number = nr_neigh_no++;
160 nr_neigh->failed = 0;
161 refcount_set(&nr_neigh->refcount, 1);
162
163 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164 nr_neigh->digipeat = kmemdup(ax25_digi,
165 sizeof(*ax25_digi),
166 GFP_KERNEL);
167 if (nr_neigh->digipeat == NULL) {
168 kfree(nr_neigh);
169 if (nr_node)
170 nr_node_put(nr_node);
171 return -ENOMEM;
172 }
173 }
174
175 spin_lock_bh(&nr_neigh_list_lock);
176 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177 nr_neigh_hold(nr_neigh);
178 spin_unlock_bh(&nr_neigh_list_lock);
179 }
180
181 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182 nr_neigh->quality = quality;
183
184 if (nr_node == NULL) {
185 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
186 if (nr_neigh)
187 nr_neigh_put(nr_neigh);
188 return -ENOMEM;
189 }
190
191 nr_node->callsign = *nr;
192 strcpy(nr_node->mnemonic, mnemonic);
193
194 nr_node->which = 0;
195 nr_node->count = 1;
196 refcount_set(&nr_node->refcount, 1);
197 spin_lock_init(&nr_node->node_lock);
198
199 nr_node->routes[0].quality = quality;
200 nr_node->routes[0].obs_count = obs_count;
201 nr_node->routes[0].neighbour = nr_neigh;
202
203 nr_neigh_hold(nr_neigh);
204 nr_neigh->count++;
205
206 spin_lock_bh(&nr_node_list_lock);
207 hlist_add_head(&nr_node->node_node, &nr_node_list);
208 /* refcount initialized at 1 */
209 spin_unlock_bh(&nr_node_list_lock);
210
211 nr_neigh_put(nr_neigh);
212 return 0;
213 }
214 nr_node_lock(nr_node);
215
216 if (quality != 0)
217 strcpy(nr_node->mnemonic, mnemonic);
218
219 for (found = 0, i = 0; i < nr_node->count; i++) {
220 if (nr_node->routes[i].neighbour == nr_neigh) {
221 nr_node->routes[i].quality = quality;
222 nr_node->routes[i].obs_count = obs_count;
223 found = 1;
224 break;
225 }
226 }
227
228 if (!found) {
229 /* We have space at the bottom, slot it in */
230 if (nr_node->count < 3) {
231 nr_node->routes[2] = nr_node->routes[1];
232 nr_node->routes[1] = nr_node->routes[0];
233
234 nr_node->routes[0].quality = quality;
235 nr_node->routes[0].obs_count = obs_count;
236 nr_node->routes[0].neighbour = nr_neigh;
237
238 nr_node->which++;
239 nr_node->count++;
240 nr_neigh_hold(nr_neigh);
241 nr_neigh->count++;
242 } else {
243 /* It must be better than the worst */
244 if (quality > nr_node->routes[2].quality) {
245 nr_node->routes[2].neighbour->count--;
246 nr_neigh_put(nr_node->routes[2].neighbour);
247
248 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
249 nr_remove_neigh(nr_node->routes[2].neighbour);
250
251 nr_node->routes[2].quality = quality;
252 nr_node->routes[2].obs_count = obs_count;
253 nr_node->routes[2].neighbour = nr_neigh;
254
255 nr_neigh_hold(nr_neigh);
256 nr_neigh->count++;
257 }
258 }
259 }
260
261 /* Now re-sort the routes in quality order */
262 switch (nr_node->count) {
263 case 3:
264 re_sort_routes(nr_node, 0, 1);
265 re_sort_routes(nr_node, 1, 2);
266 fallthrough;
267 case 2:
268 re_sort_routes(nr_node, 0, 1);
269 break;
270 case 1:
271 break;
272 }
273
274 for (i = 0; i < nr_node->count; i++) {
275 if (nr_node->routes[i].neighbour == nr_neigh) {
276 if (i < nr_node->which)
277 nr_node->which = i;
278 break;
279 }
280 }
281
282 nr_neigh_put(nr_neigh);
283 nr_node_unlock(nr_node);
284 nr_node_put(nr_node);
285 return 0;
286}
287
288static inline void __nr_remove_node(struct nr_node *nr_node)
289{
290 hlist_del_init(&nr_node->node_node);
291 nr_node_put(nr_node);
292}
293
294#define nr_remove_node_locked(__node) \
295 __nr_remove_node(__node)
296
297static void nr_remove_node(struct nr_node *nr_node)
298{
299 spin_lock_bh(&nr_node_list_lock);
300 __nr_remove_node(nr_node);
301 spin_unlock_bh(&nr_node_list_lock);
302}
303
304static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
305{
306 hlist_del_init(&nr_neigh->neigh_node);
307 nr_neigh_put(nr_neigh);
308}
309
310#define nr_remove_neigh_locked(__neigh) \
311 __nr_remove_neigh(__neigh)
312
313static void nr_remove_neigh(struct nr_neigh *nr_neigh)
314{
315 spin_lock_bh(&nr_neigh_list_lock);
316 __nr_remove_neigh(nr_neigh);
317 spin_unlock_bh(&nr_neigh_list_lock);
318}
319
320/*
321 * "Delete" a node. Strictly speaking remove a route to a node. The node
322 * is only deleted if no routes are left to it.
323 */
324static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
325{
326 struct nr_node *nr_node;
327 struct nr_neigh *nr_neigh;
328 int i;
329
330 nr_node = nr_node_get(callsign);
331
332 if (nr_node == NULL)
333 return -EINVAL;
334
335 nr_neigh = nr_neigh_get_dev(neighbour, dev);
336
337 if (nr_neigh == NULL) {
338 nr_node_put(nr_node);
339 return -EINVAL;
340 }
341
342 nr_node_lock(nr_node);
343 for (i = 0; i < nr_node->count; i++) {
344 if (nr_node->routes[i].neighbour == nr_neigh) {
345 nr_neigh->count--;
346 nr_neigh_put(nr_neigh);
347
348 if (nr_neigh->count == 0 && !nr_neigh->locked)
349 nr_remove_neigh(nr_neigh);
350 nr_neigh_put(nr_neigh);
351
352 nr_node->count--;
353
354 if (nr_node->count == 0) {
355 nr_remove_node(nr_node);
356 } else {
357 switch (i) {
358 case 0:
359 nr_node->routes[0] = nr_node->routes[1];
360 fallthrough;
361 case 1:
362 nr_node->routes[1] = nr_node->routes[2];
363 fallthrough;
364 case 2:
365 break;
366 }
367 nr_node_put(nr_node);
368 }
369 nr_node_unlock(nr_node);
370
371 return 0;
372 }
373 }
374 nr_neigh_put(nr_neigh);
375 nr_node_unlock(nr_node);
376 nr_node_put(nr_node);
377
378 return -EINVAL;
379}
380
381/*
382 * Lock a neighbour with a quality.
383 */
384static int __must_check nr_add_neigh(ax25_address *callsign,
385 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
386{
387 struct nr_neigh *nr_neigh;
388
389 nr_neigh = nr_neigh_get_dev(callsign, dev);
390 if (nr_neigh) {
391 nr_neigh->quality = quality;
392 nr_neigh->locked = 1;
393 nr_neigh_put(nr_neigh);
394 return 0;
395 }
396
397 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
398 return -ENOMEM;
399
400 nr_neigh->callsign = *callsign;
401 nr_neigh->digipeat = NULL;
402 nr_neigh->ax25 = NULL;
403 nr_neigh->dev = dev;
404 nr_neigh->quality = quality;
405 nr_neigh->locked = 1;
406 nr_neigh->count = 0;
407 nr_neigh->number = nr_neigh_no++;
408 nr_neigh->failed = 0;
409 refcount_set(&nr_neigh->refcount, 1);
410
411 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
412 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
413 GFP_KERNEL);
414 if (nr_neigh->digipeat == NULL) {
415 kfree(nr_neigh);
416 return -ENOMEM;
417 }
418 }
419
420 spin_lock_bh(&nr_neigh_list_lock);
421 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
422 /* refcount is initialized at 1 */
423 spin_unlock_bh(&nr_neigh_list_lock);
424
425 return 0;
426}
427
428/*
429 * "Delete" a neighbour. The neighbour is only removed if the number
430 * of nodes that may use it is zero.
431 */
432static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
433{
434 struct nr_neigh *nr_neigh;
435
436 nr_neigh = nr_neigh_get_dev(callsign, dev);
437
438 if (nr_neigh == NULL) return -EINVAL;
439
440 nr_neigh->quality = quality;
441 nr_neigh->locked = 0;
442
443 if (nr_neigh->count == 0)
444 nr_remove_neigh(nr_neigh);
445 nr_neigh_put(nr_neigh);
446
447 return 0;
448}
449
450/*
451 * Decrement the obsolescence count by one. If a route is reduced to a
452 * count of zero, remove it. Also remove any unlocked neighbours with
453 * zero nodes routing via it.
454 */
455static int nr_dec_obs(void)
456{
457 struct nr_neigh *nr_neigh;
458 struct nr_node *s;
459 struct hlist_node *nodet;
460 int i;
461
462 spin_lock_bh(&nr_node_list_lock);
463 nr_node_for_each_safe(s, nodet, &nr_node_list) {
464 nr_node_lock(s);
465 for (i = 0; i < s->count; i++) {
466 switch (s->routes[i].obs_count) {
467 case 0: /* A locked entry */
468 break;
469
470 case 1: /* From 1 -> 0 */
471 nr_neigh = s->routes[i].neighbour;
472
473 nr_neigh->count--;
474 nr_neigh_put(nr_neigh);
475
476 if (nr_neigh->count == 0 && !nr_neigh->locked)
477 nr_remove_neigh(nr_neigh);
478
479 s->count--;
480
481 switch (i) {
482 case 0:
483 s->routes[0] = s->routes[1];
484 fallthrough;
485 case 1:
486 s->routes[1] = s->routes[2];
487 break;
488 case 2:
489 break;
490 }
491 break;
492
493 default:
494 s->routes[i].obs_count--;
495 break;
496
497 }
498 }
499
500 if (s->count <= 0)
501 nr_remove_node_locked(s);
502 nr_node_unlock(s);
503 }
504 spin_unlock_bh(&nr_node_list_lock);
505
506 return 0;
507}
508
509/*
510 * A device has been removed. Remove its routes and neighbours.
511 */
512void nr_rt_device_down(struct net_device *dev)
513{
514 struct nr_neigh *s;
515 struct hlist_node *nodet, *node2t;
516 struct nr_node *t;
517 int i;
518
519 spin_lock_bh(&nr_neigh_list_lock);
520 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
521 if (s->dev == dev) {
522 spin_lock_bh(&nr_node_list_lock);
523 nr_node_for_each_safe(t, node2t, &nr_node_list) {
524 nr_node_lock(t);
525 for (i = 0; i < t->count; i++) {
526 if (t->routes[i].neighbour == s) {
527 t->count--;
528
529 switch (i) {
530 case 0:
531 t->routes[0] = t->routes[1];
532 fallthrough;
533 case 1:
534 t->routes[1] = t->routes[2];
535 break;
536 case 2:
537 break;
538 }
539 }
540 }
541
542 if (t->count <= 0)
543 nr_remove_node_locked(t);
544 nr_node_unlock(t);
545 }
546 spin_unlock_bh(&nr_node_list_lock);
547
548 nr_remove_neigh_locked(s);
549 }
550 }
551 spin_unlock_bh(&nr_neigh_list_lock);
552}
553
554/*
555 * Check that the device given is a valid AX.25 interface that is "up".
556 * Or a valid ethernet interface with an AX.25 callsign binding.
557 */
558static struct net_device *nr_ax25_dev_get(char *devname)
559{
560 struct net_device *dev;
561
562 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
563 return NULL;
564
565 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
566 return dev;
567
568 dev_put(dev);
569 return NULL;
570}
571
572/*
573 * Find the first active NET/ROM device, usually "nr0".
574 */
575struct net_device *nr_dev_first(void)
576{
577 struct net_device *dev, *first = NULL;
578
579 rcu_read_lock();
580 for_each_netdev_rcu(&init_net, dev) {
581 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
582 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
583 first = dev;
584 }
585 if (first)
586 dev_hold(first);
587 rcu_read_unlock();
588
589 return first;
590}
591
592/*
593 * Find the NET/ROM device for the given callsign.
594 */
595struct net_device *nr_dev_get(ax25_address *addr)
596{
597 struct net_device *dev;
598
599 rcu_read_lock();
600 for_each_netdev_rcu(&init_net, dev) {
601 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
602 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
603 dev_hold(dev);
604 goto out;
605 }
606 }
607 dev = NULL;
608out:
609 rcu_read_unlock();
610 return dev;
611}
612
613static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
614 ax25_address *digipeaters)
615{
616 int i;
617
618 if (ndigis == 0)
619 return NULL;
620
621 for (i = 0; i < ndigis; i++) {
622 digi->calls[i] = digipeaters[i];
623 digi->repeated[i] = 0;
624 }
625
626 digi->ndigi = ndigis;
627 digi->lastrepeat = -1;
628
629 return digi;
630}
631
632/*
633 * Handle the ioctls that control the routing functions.
634 */
635int nr_rt_ioctl(unsigned int cmd, void __user *arg)
636{
637 struct nr_route_struct nr_route;
638 struct net_device *dev;
639 ax25_digi digi;
640 int ret;
641
642 switch (cmd) {
643 case SIOCADDRT:
644 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
645 return -EFAULT;
646 if (nr_route.ndigis > AX25_MAX_DIGIS)
647 return -EINVAL;
648 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
649 return -EINVAL;
650 switch (nr_route.type) {
651 case NETROM_NODE:
652 if (strnlen(nr_route.mnemonic, 7) == 7) {
653 ret = -EINVAL;
654 break;
655 }
656
657 ret = nr_add_node(&nr_route.callsign,
658 nr_route.mnemonic,
659 &nr_route.neighbour,
660 nr_call_to_digi(&digi, nr_route.ndigis,
661 nr_route.digipeaters),
662 dev, nr_route.quality,
663 nr_route.obs_count);
664 break;
665 case NETROM_NEIGH:
666 ret = nr_add_neigh(&nr_route.callsign,
667 nr_call_to_digi(&digi, nr_route.ndigis,
668 nr_route.digipeaters),
669 dev, nr_route.quality);
670 break;
671 default:
672 ret = -EINVAL;
673 }
674 dev_put(dev);
675 return ret;
676
677 case SIOCDELRT:
678 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
679 return -EFAULT;
680 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
681 return -EINVAL;
682 switch (nr_route.type) {
683 case NETROM_NODE:
684 ret = nr_del_node(&nr_route.callsign,
685 &nr_route.neighbour, dev);
686 break;
687 case NETROM_NEIGH:
688 ret = nr_del_neigh(&nr_route.callsign,
689 dev, nr_route.quality);
690 break;
691 default:
692 ret = -EINVAL;
693 }
694 dev_put(dev);
695 return ret;
696
697 case SIOCNRDECOBS:
698 return nr_dec_obs();
699
700 default:
701 return -EINVAL;
702 }
703
704 return 0;
705}
706
707/*
708 * A level 2 link has timed out, therefore it appears to be a poor link,
709 * then don't use that neighbour until it is reset.
710 */
711void nr_link_failed(ax25_cb *ax25, int reason)
712{
713 struct nr_neigh *s, *nr_neigh = NULL;
714 struct nr_node *nr_node = NULL;
715
716 spin_lock_bh(&nr_neigh_list_lock);
717 nr_neigh_for_each(s, &nr_neigh_list) {
718 if (s->ax25 == ax25) {
719 nr_neigh_hold(s);
720 nr_neigh = s;
721 break;
722 }
723 }
724 spin_unlock_bh(&nr_neigh_list_lock);
725
726 if (nr_neigh == NULL)
727 return;
728
729 nr_neigh->ax25 = NULL;
730 ax25_cb_put(ax25);
731
732 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
733 nr_neigh_put(nr_neigh);
734 return;
735 }
736 spin_lock_bh(&nr_node_list_lock);
737 nr_node_for_each(nr_node, &nr_node_list) {
738 nr_node_lock(nr_node);
739 if (nr_node->which < nr_node->count &&
740 nr_node->routes[nr_node->which].neighbour == nr_neigh)
741 nr_node->which++;
742 nr_node_unlock(nr_node);
743 }
744 spin_unlock_bh(&nr_node_list_lock);
745 nr_neigh_put(nr_neigh);
746}
747
748/*
749 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
750 * indicates an internally generated frame.
751 */
752int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
753{
754 ax25_address *nr_src, *nr_dest;
755 struct nr_neigh *nr_neigh;
756 struct nr_node *nr_node;
757 struct net_device *dev;
758 unsigned char *dptr;
759 ax25_cb *ax25s;
760 int ret;
761 struct sk_buff *skbn;
762
763
764 nr_src = (ax25_address *)(skb->data + 0);
765 nr_dest = (ax25_address *)(skb->data + 7);
766
767 if (ax25 != NULL) {
768 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
769 ax25->ax25_dev->dev, 0,
770 sysctl_netrom_obsolescence_count_initialiser);
771 if (ret)
772 return ret;
773 }
774
775 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
776 if (ax25 == NULL) /* Its from me */
777 ret = nr_loopback_queue(skb);
778 else
779 ret = nr_rx_frame(skb, dev);
780 dev_put(dev);
781 return ret;
782 }
783
784 if (!sysctl_netrom_routing_control && ax25 != NULL)
785 return 0;
786
787 /* Its Time-To-Live has expired */
788 if (skb->data[14] == 1) {
789 return 0;
790 }
791
792 nr_node = nr_node_get(nr_dest);
793 if (nr_node == NULL)
794 return 0;
795 nr_node_lock(nr_node);
796
797 if (nr_node->which >= nr_node->count) {
798 nr_node_unlock(nr_node);
799 nr_node_put(nr_node);
800 return 0;
801 }
802
803 nr_neigh = nr_node->routes[nr_node->which].neighbour;
804
805 if ((dev = nr_dev_first()) == NULL) {
806 nr_node_unlock(nr_node);
807 nr_node_put(nr_node);
808 return 0;
809 }
810
811 /* We are going to change the netrom headers so we should get our
812 own skb, we also did not know until now how much header space
813 we had to reserve... - RXQ */
814 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
815 nr_node_unlock(nr_node);
816 nr_node_put(nr_node);
817 dev_put(dev);
818 return 0;
819 }
820 kfree_skb(skb);
821 skb=skbn;
822 skb->data[14]--;
823
824 dptr = skb_push(skb, 1);
825 *dptr = AX25_P_NETROM;
826
827 ax25s = nr_neigh->ax25;
828 nr_neigh->ax25 = ax25_send_frame(skb, 256,
829 (ax25_address *)dev->dev_addr,
830 &nr_neigh->callsign,
831 nr_neigh->digipeat, nr_neigh->dev);
832 if (ax25s)
833 ax25_cb_put(ax25s);
834
835 dev_put(dev);
836 ret = (nr_neigh->ax25 != NULL);
837 nr_node_unlock(nr_node);
838 nr_node_put(nr_node);
839
840 return ret;
841}
842
843#ifdef CONFIG_PROC_FS
844
845static void *nr_node_start(struct seq_file *seq, loff_t *pos)
846 __acquires(&nr_node_list_lock)
847{
848 spin_lock_bh(&nr_node_list_lock);
849 return seq_hlist_start_head(&nr_node_list, *pos);
850}
851
852static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
853{
854 return seq_hlist_next(v, &nr_node_list, pos);
855}
856
857static void nr_node_stop(struct seq_file *seq, void *v)
858 __releases(&nr_node_list_lock)
859{
860 spin_unlock_bh(&nr_node_list_lock);
861}
862
863static int nr_node_show(struct seq_file *seq, void *v)
864{
865 char buf[11];
866 int i;
867
868 if (v == SEQ_START_TOKEN)
869 seq_puts(seq,
870 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
871 else {
872 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
873 node_node);
874
875 nr_node_lock(nr_node);
876 seq_printf(seq, "%-9s %-7s %d %d",
877 ax2asc(buf, &nr_node->callsign),
878 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
879 nr_node->which + 1,
880 nr_node->count);
881
882 for (i = 0; i < nr_node->count; i++) {
883 seq_printf(seq, " %3d %d %05d",
884 nr_node->routes[i].quality,
885 nr_node->routes[i].obs_count,
886 nr_node->routes[i].neighbour->number);
887 }
888 nr_node_unlock(nr_node);
889
890 seq_puts(seq, "\n");
891 }
892 return 0;
893}
894
895const struct seq_operations nr_node_seqops = {
896 .start = nr_node_start,
897 .next = nr_node_next,
898 .stop = nr_node_stop,
899 .show = nr_node_show,
900};
901
902static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
903 __acquires(&nr_neigh_list_lock)
904{
905 spin_lock_bh(&nr_neigh_list_lock);
906 return seq_hlist_start_head(&nr_neigh_list, *pos);
907}
908
909static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
910{
911 return seq_hlist_next(v, &nr_neigh_list, pos);
912}
913
914static void nr_neigh_stop(struct seq_file *seq, void *v)
915 __releases(&nr_neigh_list_lock)
916{
917 spin_unlock_bh(&nr_neigh_list_lock);
918}
919
920static int nr_neigh_show(struct seq_file *seq, void *v)
921{
922 char buf[11];
923 int i;
924
925 if (v == SEQ_START_TOKEN)
926 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
927 else {
928 struct nr_neigh *nr_neigh;
929
930 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
931 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
932 nr_neigh->number,
933 ax2asc(buf, &nr_neigh->callsign),
934 nr_neigh->dev ? nr_neigh->dev->name : "???",
935 nr_neigh->quality,
936 nr_neigh->locked,
937 nr_neigh->count,
938 nr_neigh->failed);
939
940 if (nr_neigh->digipeat != NULL) {
941 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
942 seq_printf(seq, " %s",
943 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
944 }
945
946 seq_puts(seq, "\n");
947 }
948 return 0;
949}
950
951const struct seq_operations nr_neigh_seqops = {
952 .start = nr_neigh_start,
953 .next = nr_neigh_next,
954 .stop = nr_neigh_stop,
955 .show = nr_neigh_show,
956};
957#endif
958
959/*
960 * Free all memory associated with the nodes and routes lists.
961 */
962void nr_rt_free(void)
963{
964 struct nr_neigh *s = NULL;
965 struct nr_node *t = NULL;
966 struct hlist_node *nodet;
967
968 spin_lock_bh(&nr_neigh_list_lock);
969 spin_lock_bh(&nr_node_list_lock);
970 nr_node_for_each_safe(t, nodet, &nr_node_list) {
971 nr_node_lock(t);
972 nr_remove_node_locked(t);
973 nr_node_unlock(t);
974 }
975 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
976 while(s->count) {
977 s->count--;
978 nr_neigh_put(s);
979 }
980 nr_remove_neigh_locked(s);
981 }
982 spin_unlock_bh(&nr_node_list_lock);
983 spin_unlock_bh(&nr_neigh_list_lock);
984}