xref: /openbmc/linux/net/netrom/nr_route.c (revision abb84c46)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5  * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6  * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
7  */
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
11 #include <linux/in.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/string.h>
15 #include <linux/sockios.h>
16 #include <linux/net.h>
17 #include <linux/slab.h>
18 #include <net/ax25.h>
19 #include <linux/inet.h>
20 #include <linux/netdevice.h>
21 #include <net/arp.h>
22 #include <linux/if_arp.h>
23 #include <linux/skbuff.h>
24 #include <net/sock.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/termios.h>	/* For TIOCINQ/OUTQ */
28 #include <linux/mm.h>
29 #include <linux/interrupt.h>
30 #include <linux/notifier.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <net/netrom.h>
34 #include <linux/seq_file.h>
35 #include <linux/export.h>
36 
37 static unsigned int nr_neigh_no = 1;
38 
39 static HLIST_HEAD(nr_node_list);
40 static DEFINE_SPINLOCK(nr_node_list_lock);
41 static HLIST_HEAD(nr_neigh_list);
42 static DEFINE_SPINLOCK(nr_neigh_list_lock);
43 
44 static struct nr_node *nr_node_get(ax25_address *callsign)
45 {
46 	struct nr_node *found = NULL;
47 	struct nr_node *nr_node;
48 
49 	spin_lock_bh(&nr_node_list_lock);
50 	nr_node_for_each(nr_node, &nr_node_list)
51 		if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52 			nr_node_hold(nr_node);
53 			found = nr_node;
54 			break;
55 		}
56 	spin_unlock_bh(&nr_node_list_lock);
57 	return found;
58 }
59 
60 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61 					 struct net_device *dev)
62 {
63 	struct nr_neigh *found = NULL;
64 	struct nr_neigh *nr_neigh;
65 
66 	spin_lock_bh(&nr_neigh_list_lock);
67 	nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68 		if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69 		    nr_neigh->dev == dev) {
70 			nr_neigh_hold(nr_neigh);
71 			found = nr_neigh;
72 			break;
73 		}
74 	spin_unlock_bh(&nr_neigh_list_lock);
75 	return found;
76 }
77 
78 static void nr_remove_neigh(struct nr_neigh *);
79 
80 /*      re-sort the routes in quality order.    */
81 static void re_sort_routes(struct nr_node *nr_node, int x, int y)
82 {
83 	if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84 		if (nr_node->which == x)
85 			nr_node->which = y;
86 		else if (nr_node->which == y)
87 			nr_node->which = x;
88 
89 		swap(nr_node->routes[x], nr_node->routes[y]);
90 	}
91 }
92 
93 /*
94  *	Add a new route to a node, and in the process add the node and the
95  *	neighbour if it is new.
96  */
97 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98 	ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99 	int quality, int obs_count)
100 {
101 	struct nr_node  *nr_node;
102 	struct nr_neigh *nr_neigh;
103 	int i, found;
104 	struct net_device *odev;
105 
106 	if ((odev=nr_dev_get(nr)) != NULL) {	/* Can't add routes to ourself */
107 		dev_put(odev);
108 		return -EINVAL;
109 	}
110 
111 	nr_node = nr_node_get(nr);
112 
113 	nr_neigh = nr_neigh_get_dev(ax25, dev);
114 
115 	/*
116 	 * The L2 link to a neighbour has failed in the past
117 	 * and now a frame comes from this neighbour. We assume
118 	 * it was a temporary trouble with the link and reset the
119 	 * routes now (and not wait for a node broadcast).
120 	 */
121 	if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122 		struct nr_node *nr_nodet;
123 
124 		spin_lock_bh(&nr_node_list_lock);
125 		nr_node_for_each(nr_nodet, &nr_node_list) {
126 			nr_node_lock(nr_nodet);
127 			for (i = 0; i < nr_nodet->count; i++)
128 				if (nr_nodet->routes[i].neighbour == nr_neigh)
129 					if (i < nr_nodet->which)
130 						nr_nodet->which = i;
131 			nr_node_unlock(nr_nodet);
132 		}
133 		spin_unlock_bh(&nr_node_list_lock);
134 	}
135 
136 	if (nr_neigh != NULL)
137 		nr_neigh->failed = 0;
138 
139 	if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140 		nr_neigh_put(nr_neigh);
141 		nr_node_put(nr_node);
142 		return 0;
143 	}
144 
145 	if (nr_neigh == NULL) {
146 		if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
147 			if (nr_node)
148 				nr_node_put(nr_node);
149 			return -ENOMEM;
150 		}
151 
152 		nr_neigh->callsign = *ax25;
153 		nr_neigh->digipeat = NULL;
154 		nr_neigh->ax25     = NULL;
155 		nr_neigh->dev      = dev;
156 		nr_neigh->quality  = READ_ONCE(sysctl_netrom_default_path_quality);
157 		nr_neigh->locked   = 0;
158 		nr_neigh->count    = 0;
159 		nr_neigh->number   = nr_neigh_no++;
160 		nr_neigh->failed   = 0;
161 		refcount_set(&nr_neigh->refcount, 1);
162 
163 		if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164 			nr_neigh->digipeat = kmemdup(ax25_digi,
165 						     sizeof(*ax25_digi),
166 						     GFP_KERNEL);
167 			if (nr_neigh->digipeat == NULL) {
168 				kfree(nr_neigh);
169 				if (nr_node)
170 					nr_node_put(nr_node);
171 				return -ENOMEM;
172 			}
173 		}
174 
175 		spin_lock_bh(&nr_neigh_list_lock);
176 		hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177 		nr_neigh_hold(nr_neigh);
178 		spin_unlock_bh(&nr_neigh_list_lock);
179 	}
180 
181 	if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182 		nr_neigh->quality = quality;
183 
184 	if (nr_node == NULL) {
185 		if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
186 			if (nr_neigh)
187 				nr_neigh_put(nr_neigh);
188 			return -ENOMEM;
189 		}
190 
191 		nr_node->callsign = *nr;
192 		strcpy(nr_node->mnemonic, mnemonic);
193 
194 		nr_node->which = 0;
195 		nr_node->count = 1;
196 		refcount_set(&nr_node->refcount, 1);
197 		spin_lock_init(&nr_node->node_lock);
198 
199 		nr_node->routes[0].quality   = quality;
200 		nr_node->routes[0].obs_count = obs_count;
201 		nr_node->routes[0].neighbour = nr_neigh;
202 
203 		nr_neigh_hold(nr_neigh);
204 		nr_neigh->count++;
205 
206 		spin_lock_bh(&nr_node_list_lock);
207 		hlist_add_head(&nr_node->node_node, &nr_node_list);
208 		/* refcount initialized at 1 */
209 		spin_unlock_bh(&nr_node_list_lock);
210 
211 		nr_neigh_put(nr_neigh);
212 		return 0;
213 	}
214 	nr_node_lock(nr_node);
215 
216 	if (quality != 0)
217 		strcpy(nr_node->mnemonic, mnemonic);
218 
219 	for (found = 0, i = 0; i < nr_node->count; i++) {
220 		if (nr_node->routes[i].neighbour == nr_neigh) {
221 			nr_node->routes[i].quality   = quality;
222 			nr_node->routes[i].obs_count = obs_count;
223 			found = 1;
224 			break;
225 		}
226 	}
227 
228 	if (!found) {
229 		/* We have space at the bottom, slot it in */
230 		if (nr_node->count < 3) {
231 			nr_node->routes[2] = nr_node->routes[1];
232 			nr_node->routes[1] = nr_node->routes[0];
233 
234 			nr_node->routes[0].quality   = quality;
235 			nr_node->routes[0].obs_count = obs_count;
236 			nr_node->routes[0].neighbour = nr_neigh;
237 
238 			nr_node->which++;
239 			nr_node->count++;
240 			nr_neigh_hold(nr_neigh);
241 			nr_neigh->count++;
242 		} else {
243 			/* It must be better than the worst */
244 			if (quality > nr_node->routes[2].quality) {
245 				nr_node->routes[2].neighbour->count--;
246 				nr_neigh_put(nr_node->routes[2].neighbour);
247 
248 				if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
249 					nr_remove_neigh(nr_node->routes[2].neighbour);
250 
251 				nr_node->routes[2].quality   = quality;
252 				nr_node->routes[2].obs_count = obs_count;
253 				nr_node->routes[2].neighbour = nr_neigh;
254 
255 				nr_neigh_hold(nr_neigh);
256 				nr_neigh->count++;
257 			}
258 		}
259 	}
260 
261 	/* Now re-sort the routes in quality order */
262 	switch (nr_node->count) {
263 	case 3:
264 		re_sort_routes(nr_node, 0, 1);
265 		re_sort_routes(nr_node, 1, 2);
266 		fallthrough;
267 	case 2:
268 		re_sort_routes(nr_node, 0, 1);
269 		break;
270 	case 1:
271 		break;
272 	}
273 
274 	for (i = 0; i < nr_node->count; i++) {
275 		if (nr_node->routes[i].neighbour == nr_neigh) {
276 			if (i < nr_node->which)
277 				nr_node->which = i;
278 			break;
279 		}
280 	}
281 
282 	nr_neigh_put(nr_neigh);
283 	nr_node_unlock(nr_node);
284 	nr_node_put(nr_node);
285 	return 0;
286 }
287 
288 static void nr_remove_node_locked(struct nr_node *nr_node)
289 {
290 	lockdep_assert_held(&nr_node_list_lock);
291 
292 	hlist_del_init(&nr_node->node_node);
293 	nr_node_put(nr_node);
294 }
295 
296 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
297 {
298 	hlist_del_init(&nr_neigh->neigh_node);
299 	nr_neigh_put(nr_neigh);
300 }
301 
302 #define nr_remove_neigh_locked(__neigh) \
303 	__nr_remove_neigh(__neigh)
304 
305 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
306 {
307 	spin_lock_bh(&nr_neigh_list_lock);
308 	__nr_remove_neigh(nr_neigh);
309 	spin_unlock_bh(&nr_neigh_list_lock);
310 }
311 
312 /*
313  *	"Delete" a node. Strictly speaking remove a route to a node. The node
314  *	is only deleted if no routes are left to it.
315  */
316 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
317 {
318 	struct nr_node  *nr_node;
319 	struct nr_neigh *nr_neigh;
320 	int i;
321 
322 	nr_node = nr_node_get(callsign);
323 
324 	if (nr_node == NULL)
325 		return -EINVAL;
326 
327 	nr_neigh = nr_neigh_get_dev(neighbour, dev);
328 
329 	if (nr_neigh == NULL) {
330 		nr_node_put(nr_node);
331 		return -EINVAL;
332 	}
333 
334 	spin_lock_bh(&nr_node_list_lock);
335 	nr_node_lock(nr_node);
336 	for (i = 0; i < nr_node->count; i++) {
337 		if (nr_node->routes[i].neighbour == nr_neigh) {
338 			nr_neigh->count--;
339 			nr_neigh_put(nr_neigh);
340 
341 			if (nr_neigh->count == 0 && !nr_neigh->locked)
342 				nr_remove_neigh(nr_neigh);
343 			nr_neigh_put(nr_neigh);
344 
345 			nr_node->count--;
346 
347 			if (nr_node->count == 0) {
348 				nr_remove_node_locked(nr_node);
349 			} else {
350 				switch (i) {
351 				case 0:
352 					nr_node->routes[0] = nr_node->routes[1];
353 					fallthrough;
354 				case 1:
355 					nr_node->routes[1] = nr_node->routes[2];
356 					fallthrough;
357 				case 2:
358 					break;
359 				}
360 				nr_node_put(nr_node);
361 			}
362 			nr_node_unlock(nr_node);
363 			spin_unlock_bh(&nr_node_list_lock);
364 
365 			return 0;
366 		}
367 	}
368 	nr_neigh_put(nr_neigh);
369 	nr_node_unlock(nr_node);
370 	spin_unlock_bh(&nr_node_list_lock);
371 	nr_node_put(nr_node);
372 
373 	return -EINVAL;
374 }
375 
376 /*
377  *	Lock a neighbour with a quality.
378  */
379 static int __must_check nr_add_neigh(ax25_address *callsign,
380 	ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
381 {
382 	struct nr_neigh *nr_neigh;
383 
384 	nr_neigh = nr_neigh_get_dev(callsign, dev);
385 	if (nr_neigh) {
386 		nr_neigh->quality = quality;
387 		nr_neigh->locked  = 1;
388 		nr_neigh_put(nr_neigh);
389 		return 0;
390 	}
391 
392 	if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
393 		return -ENOMEM;
394 
395 	nr_neigh->callsign = *callsign;
396 	nr_neigh->digipeat = NULL;
397 	nr_neigh->ax25     = NULL;
398 	nr_neigh->dev      = dev;
399 	nr_neigh->quality  = quality;
400 	nr_neigh->locked   = 1;
401 	nr_neigh->count    = 0;
402 	nr_neigh->number   = nr_neigh_no++;
403 	nr_neigh->failed   = 0;
404 	refcount_set(&nr_neigh->refcount, 1);
405 
406 	if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
407 		nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
408 					     GFP_KERNEL);
409 		if (nr_neigh->digipeat == NULL) {
410 			kfree(nr_neigh);
411 			return -ENOMEM;
412 		}
413 	}
414 
415 	spin_lock_bh(&nr_neigh_list_lock);
416 	hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
417 	/* refcount is initialized at 1 */
418 	spin_unlock_bh(&nr_neigh_list_lock);
419 
420 	return 0;
421 }
422 
423 /*
424  *	"Delete" a neighbour. The neighbour is only removed if the number
425  *	of nodes that may use it is zero.
426  */
427 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
428 {
429 	struct nr_neigh *nr_neigh;
430 
431 	nr_neigh = nr_neigh_get_dev(callsign, dev);
432 
433 	if (nr_neigh == NULL) return -EINVAL;
434 
435 	nr_neigh->quality = quality;
436 	nr_neigh->locked  = 0;
437 
438 	if (nr_neigh->count == 0)
439 		nr_remove_neigh(nr_neigh);
440 	nr_neigh_put(nr_neigh);
441 
442 	return 0;
443 }
444 
445 /*
446  *	Decrement the obsolescence count by one. If a route is reduced to a
447  *	count of zero, remove it. Also remove any unlocked neighbours with
448  *	zero nodes routing via it.
449  */
450 static int nr_dec_obs(void)
451 {
452 	struct nr_neigh *nr_neigh;
453 	struct nr_node  *s;
454 	struct hlist_node *nodet;
455 	int i;
456 
457 	spin_lock_bh(&nr_node_list_lock);
458 	nr_node_for_each_safe(s, nodet, &nr_node_list) {
459 		nr_node_lock(s);
460 		for (i = 0; i < s->count; i++) {
461 			switch (s->routes[i].obs_count) {
462 			case 0:		/* A locked entry */
463 				break;
464 
465 			case 1:		/* From 1 -> 0 */
466 				nr_neigh = s->routes[i].neighbour;
467 
468 				nr_neigh->count--;
469 				nr_neigh_put(nr_neigh);
470 
471 				if (nr_neigh->count == 0 && !nr_neigh->locked)
472 					nr_remove_neigh(nr_neigh);
473 
474 				s->count--;
475 
476 				switch (i) {
477 				case 0:
478 					s->routes[0] = s->routes[1];
479 					fallthrough;
480 				case 1:
481 					s->routes[1] = s->routes[2];
482 					break;
483 				case 2:
484 					break;
485 				}
486 				break;
487 
488 			default:
489 				s->routes[i].obs_count--;
490 				break;
491 
492 			}
493 		}
494 
495 		if (s->count <= 0)
496 			nr_remove_node_locked(s);
497 		nr_node_unlock(s);
498 	}
499 	spin_unlock_bh(&nr_node_list_lock);
500 
501 	return 0;
502 }
503 
504 /*
505  *	A device has been removed. Remove its routes and neighbours.
506  */
507 void nr_rt_device_down(struct net_device *dev)
508 {
509 	struct nr_neigh *s;
510 	struct hlist_node *nodet, *node2t;
511 	struct nr_node  *t;
512 	int i;
513 
514 	spin_lock_bh(&nr_neigh_list_lock);
515 	nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
516 		if (s->dev == dev) {
517 			spin_lock_bh(&nr_node_list_lock);
518 			nr_node_for_each_safe(t, node2t, &nr_node_list) {
519 				nr_node_lock(t);
520 				for (i = 0; i < t->count; i++) {
521 					if (t->routes[i].neighbour == s) {
522 						t->count--;
523 
524 						switch (i) {
525 						case 0:
526 							t->routes[0] = t->routes[1];
527 							fallthrough;
528 						case 1:
529 							t->routes[1] = t->routes[2];
530 							break;
531 						case 2:
532 							break;
533 						}
534 					}
535 				}
536 
537 				if (t->count <= 0)
538 					nr_remove_node_locked(t);
539 				nr_node_unlock(t);
540 			}
541 			spin_unlock_bh(&nr_node_list_lock);
542 
543 			nr_remove_neigh_locked(s);
544 		}
545 	}
546 	spin_unlock_bh(&nr_neigh_list_lock);
547 }
548 
549 /*
550  *	Check that the device given is a valid AX.25 interface that is "up".
551  *	Or a valid ethernet interface with an AX.25 callsign binding.
552  */
553 static struct net_device *nr_ax25_dev_get(char *devname)
554 {
555 	struct net_device *dev;
556 
557 	if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
558 		return NULL;
559 
560 	if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
561 		return dev;
562 
563 	dev_put(dev);
564 	return NULL;
565 }
566 
567 /*
568  *	Find the first active NET/ROM device, usually "nr0".
569  */
570 struct net_device *nr_dev_first(void)
571 {
572 	struct net_device *dev, *first = NULL;
573 
574 	rcu_read_lock();
575 	for_each_netdev_rcu(&init_net, dev) {
576 		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
577 			if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
578 				first = dev;
579 	}
580 	dev_hold(first);
581 	rcu_read_unlock();
582 
583 	return first;
584 }
585 
586 /*
587  *	Find the NET/ROM device for the given callsign.
588  */
589 struct net_device *nr_dev_get(ax25_address *addr)
590 {
591 	struct net_device *dev;
592 
593 	rcu_read_lock();
594 	for_each_netdev_rcu(&init_net, dev) {
595 		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
596 		    ax25cmp(addr, (const ax25_address *)dev->dev_addr) == 0) {
597 			dev_hold(dev);
598 			goto out;
599 		}
600 	}
601 	dev = NULL;
602 out:
603 	rcu_read_unlock();
604 	return dev;
605 }
606 
607 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
608 	ax25_address *digipeaters)
609 {
610 	int i;
611 
612 	if (ndigis == 0)
613 		return NULL;
614 
615 	for (i = 0; i < ndigis; i++) {
616 		digi->calls[i]    = digipeaters[i];
617 		digi->repeated[i] = 0;
618 	}
619 
620 	digi->ndigi      = ndigis;
621 	digi->lastrepeat = -1;
622 
623 	return digi;
624 }
625 
626 /*
627  *	Handle the ioctls that control the routing functions.
628  */
629 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
630 {
631 	struct nr_route_struct nr_route;
632 	struct net_device *dev;
633 	ax25_digi digi;
634 	int ret;
635 
636 	switch (cmd) {
637 	case SIOCADDRT:
638 		if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
639 			return -EFAULT;
640 		if (nr_route.ndigis > AX25_MAX_DIGIS)
641 			return -EINVAL;
642 		if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
643 			return -EINVAL;
644 		switch (nr_route.type) {
645 		case NETROM_NODE:
646 			if (strnlen(nr_route.mnemonic, 7) == 7) {
647 				ret = -EINVAL;
648 				break;
649 			}
650 
651 			ret = nr_add_node(&nr_route.callsign,
652 				nr_route.mnemonic,
653 				&nr_route.neighbour,
654 				nr_call_to_digi(&digi, nr_route.ndigis,
655 						nr_route.digipeaters),
656 				dev, nr_route.quality,
657 				nr_route.obs_count);
658 			break;
659 		case NETROM_NEIGH:
660 			ret = nr_add_neigh(&nr_route.callsign,
661 				nr_call_to_digi(&digi, nr_route.ndigis,
662 						nr_route.digipeaters),
663 				dev, nr_route.quality);
664 			break;
665 		default:
666 			ret = -EINVAL;
667 		}
668 		dev_put(dev);
669 		return ret;
670 
671 	case SIOCDELRT:
672 		if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
673 			return -EFAULT;
674 		if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
675 			return -EINVAL;
676 		switch (nr_route.type) {
677 		case NETROM_NODE:
678 			ret = nr_del_node(&nr_route.callsign,
679 				&nr_route.neighbour, dev);
680 			break;
681 		case NETROM_NEIGH:
682 			ret = nr_del_neigh(&nr_route.callsign,
683 				dev, nr_route.quality);
684 			break;
685 		default:
686 			ret = -EINVAL;
687 		}
688 		dev_put(dev);
689 		return ret;
690 
691 	case SIOCNRDECOBS:
692 		return nr_dec_obs();
693 
694 	default:
695 		return -EINVAL;
696 	}
697 
698 	return 0;
699 }
700 
701 /*
702  * 	A level 2 link has timed out, therefore it appears to be a poor link,
703  *	then don't use that neighbour until it is reset.
704  */
705 void nr_link_failed(ax25_cb *ax25, int reason)
706 {
707 	struct nr_neigh *s, *nr_neigh = NULL;
708 	struct nr_node  *nr_node = NULL;
709 
710 	spin_lock_bh(&nr_neigh_list_lock);
711 	nr_neigh_for_each(s, &nr_neigh_list) {
712 		if (s->ax25 == ax25) {
713 			nr_neigh_hold(s);
714 			nr_neigh = s;
715 			break;
716 		}
717 	}
718 	spin_unlock_bh(&nr_neigh_list_lock);
719 
720 	if (nr_neigh == NULL)
721 		return;
722 
723 	nr_neigh->ax25 = NULL;
724 	ax25_cb_put(ax25);
725 
726 	if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
727 		nr_neigh_put(nr_neigh);
728 		return;
729 	}
730 	spin_lock_bh(&nr_node_list_lock);
731 	nr_node_for_each(nr_node, &nr_node_list) {
732 		nr_node_lock(nr_node);
733 		if (nr_node->which < nr_node->count &&
734 		    nr_node->routes[nr_node->which].neighbour == nr_neigh)
735 			nr_node->which++;
736 		nr_node_unlock(nr_node);
737 	}
738 	spin_unlock_bh(&nr_node_list_lock);
739 	nr_neigh_put(nr_neigh);
740 }
741 
742 /*
743  *	Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
744  *	indicates an internally generated frame.
745  */
746 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
747 {
748 	ax25_address *nr_src, *nr_dest;
749 	struct nr_neigh *nr_neigh;
750 	struct nr_node  *nr_node;
751 	struct net_device *dev;
752 	unsigned char *dptr;
753 	ax25_cb *ax25s;
754 	int ret;
755 	struct sk_buff *skbn;
756 
757 
758 	nr_src  = (ax25_address *)(skb->data + 0);
759 	nr_dest = (ax25_address *)(skb->data + 7);
760 
761 	if (ax25 != NULL) {
762 		ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
763 				  ax25->ax25_dev->dev, 0,
764 				  READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
765 		if (ret)
766 			return ret;
767 	}
768 
769 	if ((dev = nr_dev_get(nr_dest)) != NULL) {	/* Its for me */
770 		if (ax25 == NULL)			/* Its from me */
771 			ret = nr_loopback_queue(skb);
772 		else
773 			ret = nr_rx_frame(skb, dev);
774 		dev_put(dev);
775 		return ret;
776 	}
777 
778 	if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
779 		return 0;
780 
781 	/* Its Time-To-Live has expired */
782 	if (skb->data[14] == 1) {
783 		return 0;
784 	}
785 
786 	nr_node = nr_node_get(nr_dest);
787 	if (nr_node == NULL)
788 		return 0;
789 	nr_node_lock(nr_node);
790 
791 	if (nr_node->which >= nr_node->count) {
792 		nr_node_unlock(nr_node);
793 		nr_node_put(nr_node);
794 		return 0;
795 	}
796 
797 	nr_neigh = nr_node->routes[nr_node->which].neighbour;
798 
799 	if ((dev = nr_dev_first()) == NULL) {
800 		nr_node_unlock(nr_node);
801 		nr_node_put(nr_node);
802 		return 0;
803 	}
804 
805 	/* We are going to change the netrom headers so we should get our
806 	   own skb, we also did not know until now how much header space
807 	   we had to reserve... - RXQ */
808 	if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
809 		nr_node_unlock(nr_node);
810 		nr_node_put(nr_node);
811 		dev_put(dev);
812 		return 0;
813 	}
814 	kfree_skb(skb);
815 	skb=skbn;
816 	skb->data[14]--;
817 
818 	dptr  = skb_push(skb, 1);
819 	*dptr = AX25_P_NETROM;
820 
821 	ax25s = nr_neigh->ax25;
822 	nr_neigh->ax25 = ax25_send_frame(skb, 256,
823 					 (const ax25_address *)dev->dev_addr,
824 					 &nr_neigh->callsign,
825 					 nr_neigh->digipeat, nr_neigh->dev);
826 	if (ax25s)
827 		ax25_cb_put(ax25s);
828 
829 	dev_put(dev);
830 	ret = (nr_neigh->ax25 != NULL);
831 	nr_node_unlock(nr_node);
832 	nr_node_put(nr_node);
833 
834 	return ret;
835 }
836 
837 #ifdef CONFIG_PROC_FS
838 
839 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
840 	__acquires(&nr_node_list_lock)
841 {
842 	spin_lock_bh(&nr_node_list_lock);
843 	return seq_hlist_start_head(&nr_node_list, *pos);
844 }
845 
846 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
847 {
848 	return seq_hlist_next(v, &nr_node_list, pos);
849 }
850 
851 static void nr_node_stop(struct seq_file *seq, void *v)
852 	__releases(&nr_node_list_lock)
853 {
854 	spin_unlock_bh(&nr_node_list_lock);
855 }
856 
857 static int nr_node_show(struct seq_file *seq, void *v)
858 {
859 	char buf[11];
860 	int i;
861 
862 	if (v == SEQ_START_TOKEN)
863 		seq_puts(seq,
864 			 "callsign  mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
865 	else {
866 		struct nr_node *nr_node = hlist_entry(v, struct nr_node,
867 						      node_node);
868 
869 		nr_node_lock(nr_node);
870 		seq_printf(seq, "%-9s %-7s  %d %d",
871 			ax2asc(buf, &nr_node->callsign),
872 			(nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
873 			nr_node->which + 1,
874 			nr_node->count);
875 
876 		for (i = 0; i < nr_node->count; i++) {
877 			seq_printf(seq, "  %3d   %d %05d",
878 				nr_node->routes[i].quality,
879 				nr_node->routes[i].obs_count,
880 				nr_node->routes[i].neighbour->number);
881 		}
882 		nr_node_unlock(nr_node);
883 
884 		seq_puts(seq, "\n");
885 	}
886 	return 0;
887 }
888 
889 const struct seq_operations nr_node_seqops = {
890 	.start = nr_node_start,
891 	.next = nr_node_next,
892 	.stop = nr_node_stop,
893 	.show = nr_node_show,
894 };
895 
896 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
897 	__acquires(&nr_neigh_list_lock)
898 {
899 	spin_lock_bh(&nr_neigh_list_lock);
900 	return seq_hlist_start_head(&nr_neigh_list, *pos);
901 }
902 
903 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
904 {
905 	return seq_hlist_next(v, &nr_neigh_list, pos);
906 }
907 
908 static void nr_neigh_stop(struct seq_file *seq, void *v)
909 	__releases(&nr_neigh_list_lock)
910 {
911 	spin_unlock_bh(&nr_neigh_list_lock);
912 }
913 
914 static int nr_neigh_show(struct seq_file *seq, void *v)
915 {
916 	char buf[11];
917 	int i;
918 
919 	if (v == SEQ_START_TOKEN)
920 		seq_puts(seq, "addr  callsign  dev  qual lock count failed digipeaters\n");
921 	else {
922 		struct nr_neigh *nr_neigh;
923 
924 		nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
925 		seq_printf(seq, "%05d %-9s %-4s  %3d    %d   %3d    %3d",
926 			nr_neigh->number,
927 			ax2asc(buf, &nr_neigh->callsign),
928 			nr_neigh->dev ? nr_neigh->dev->name : "???",
929 			nr_neigh->quality,
930 			nr_neigh->locked,
931 			nr_neigh->count,
932 			nr_neigh->failed);
933 
934 		if (nr_neigh->digipeat != NULL) {
935 			for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
936 				seq_printf(seq, " %s",
937 					   ax2asc(buf, &nr_neigh->digipeat->calls[i]));
938 		}
939 
940 		seq_puts(seq, "\n");
941 	}
942 	return 0;
943 }
944 
945 const struct seq_operations nr_neigh_seqops = {
946 	.start = nr_neigh_start,
947 	.next = nr_neigh_next,
948 	.stop = nr_neigh_stop,
949 	.show = nr_neigh_show,
950 };
951 #endif
952 
953 /*
954  *	Free all memory associated with the nodes and routes lists.
955  */
956 void nr_rt_free(void)
957 {
958 	struct nr_neigh *s = NULL;
959 	struct nr_node  *t = NULL;
960 	struct hlist_node *nodet;
961 
962 	spin_lock_bh(&nr_neigh_list_lock);
963 	spin_lock_bh(&nr_node_list_lock);
964 	nr_node_for_each_safe(t, nodet, &nr_node_list) {
965 		nr_node_lock(t);
966 		nr_remove_node_locked(t);
967 		nr_node_unlock(t);
968 	}
969 	nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
970 		while(s->count) {
971 			s->count--;
972 			nr_neigh_put(s);
973 		}
974 		nr_remove_neigh_locked(s);
975 	}
976 	spin_unlock_bh(&nr_node_list_lock);
977 	spin_unlock_bh(&nr_neigh_list_lock);
978 }
979