1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) 10 */ 11 #include <linux/errno.h> 12 #include <linux/types.h> 13 #include <linux/socket.h> 14 #include <linux/in.h> 15 #include <linux/kernel.h> 16 #include <linux/timer.h> 17 #include <linux/string.h> 18 #include <linux/sockios.h> 19 #include <linux/net.h> 20 #include <linux/slab.h> 21 #include <net/ax25.h> 22 #include <linux/inet.h> 23 #include <linux/netdevice.h> 24 #include <net/arp.h> 25 #include <linux/if_arp.h> 26 #include <linux/skbuff.h> 27 #include <net/sock.h> 28 #include <asm/uaccess.h> 29 #include <asm/system.h> 30 #include <linux/fcntl.h> 31 #include <linux/termios.h> /* For TIOCINQ/OUTQ */ 32 #include <linux/mm.h> 33 #include <linux/interrupt.h> 34 #include <linux/notifier.h> 35 #include <linux/netfilter.h> 36 #include <linux/init.h> 37 #include <linux/spinlock.h> 38 #include <net/netrom.h> 39 #include <linux/seq_file.h> 40 41 static unsigned int nr_neigh_no = 1; 42 43 static HLIST_HEAD(nr_node_list); 44 static DEFINE_SPINLOCK(nr_node_list_lock); 45 static HLIST_HEAD(nr_neigh_list); 46 static DEFINE_SPINLOCK(nr_neigh_list_lock); 47 48 static struct nr_node *nr_node_get(ax25_address *callsign) 49 { 50 struct nr_node *found = NULL; 51 struct nr_node *nr_node; 52 struct hlist_node *node; 53 54 spin_lock_bh(&nr_node_list_lock); 55 nr_node_for_each(nr_node, node, &nr_node_list) 56 if (ax25cmp(callsign, &nr_node->callsign) == 0) { 57 nr_node_hold(nr_node); 58 found = nr_node; 59 break; 60 } 61 spin_unlock_bh(&nr_node_list_lock); 62 return found; 63 } 64 65 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, 66 struct net_device *dev) 67 { 68 struct nr_neigh *found = NULL; 69 struct nr_neigh *nr_neigh; 70 struct hlist_node *node; 71 72 spin_lock_bh(&nr_neigh_list_lock); 73 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) 74 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && 75 nr_neigh->dev == dev) { 76 nr_neigh_hold(nr_neigh); 77 found = nr_neigh; 78 break; 79 } 80 spin_unlock_bh(&nr_neigh_list_lock); 81 return found; 82 } 83 84 static void nr_remove_neigh(struct nr_neigh *); 85 86 /* 87 * Add a new route to a node, and in the process add the node and the 88 * neighbour if it is new. 89 */ 90 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, 91 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev, 92 int quality, int obs_count) 93 { 94 struct nr_node *nr_node; 95 struct nr_neigh *nr_neigh; 96 struct nr_route nr_route; 97 int i, found; 98 struct net_device *odev; 99 100 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */ 101 dev_put(odev); 102 return -EINVAL; 103 } 104 105 nr_node = nr_node_get(nr); 106 107 nr_neigh = nr_neigh_get_dev(ax25, dev); 108 109 /* 110 * The L2 link to a neighbour has failed in the past 111 * and now a frame comes from this neighbour. We assume 112 * it was a temporary trouble with the link and reset the 113 * routes now (and not wait for a node broadcast). 114 */ 115 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { 116 struct nr_node *nr_nodet; 117 struct hlist_node *node; 118 119 spin_lock_bh(&nr_node_list_lock); 120 nr_node_for_each(nr_nodet, node, &nr_node_list) { 121 nr_node_lock(nr_nodet); 122 for (i = 0; i < nr_nodet->count; i++) 123 if (nr_nodet->routes[i].neighbour == nr_neigh) 124 if (i < nr_nodet->which) 125 nr_nodet->which = i; 126 nr_node_unlock(nr_nodet); 127 } 128 spin_unlock_bh(&nr_node_list_lock); 129 } 130 131 if (nr_neigh != NULL) 132 nr_neigh->failed = 0; 133 134 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) { 135 nr_neigh_put(nr_neigh); 136 nr_node_put(nr_node); 137 return 0; 138 } 139 140 if (nr_neigh == NULL) { 141 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) { 142 if (nr_node) 143 nr_node_put(nr_node); 144 return -ENOMEM; 145 } 146 147 nr_neigh->callsign = *ax25; 148 nr_neigh->digipeat = NULL; 149 nr_neigh->ax25 = NULL; 150 nr_neigh->dev = dev; 151 nr_neigh->quality = sysctl_netrom_default_path_quality; 152 nr_neigh->locked = 0; 153 nr_neigh->count = 0; 154 nr_neigh->number = nr_neigh_no++; 155 nr_neigh->failed = 0; 156 atomic_set(&nr_neigh->refcount, 1); 157 158 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 159 nr_neigh->digipeat = kmemdup(ax25_digi, 160 sizeof(*ax25_digi), 161 GFP_KERNEL); 162 if (nr_neigh->digipeat == NULL) { 163 kfree(nr_neigh); 164 if (nr_node) 165 nr_node_put(nr_node); 166 return -ENOMEM; 167 } 168 } 169 170 spin_lock_bh(&nr_neigh_list_lock); 171 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 172 nr_neigh_hold(nr_neigh); 173 spin_unlock_bh(&nr_neigh_list_lock); 174 } 175 176 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked) 177 nr_neigh->quality = quality; 178 179 if (nr_node == NULL) { 180 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) { 181 if (nr_neigh) 182 nr_neigh_put(nr_neigh); 183 return -ENOMEM; 184 } 185 186 nr_node->callsign = *nr; 187 strcpy(nr_node->mnemonic, mnemonic); 188 189 nr_node->which = 0; 190 nr_node->count = 1; 191 atomic_set(&nr_node->refcount, 1); 192 spin_lock_init(&nr_node->node_lock); 193 194 nr_node->routes[0].quality = quality; 195 nr_node->routes[0].obs_count = obs_count; 196 nr_node->routes[0].neighbour = nr_neigh; 197 198 nr_neigh_hold(nr_neigh); 199 nr_neigh->count++; 200 201 spin_lock_bh(&nr_node_list_lock); 202 hlist_add_head(&nr_node->node_node, &nr_node_list); 203 /* refcount initialized at 1 */ 204 spin_unlock_bh(&nr_node_list_lock); 205 206 return 0; 207 } 208 nr_node_lock(nr_node); 209 210 if (quality != 0) 211 strcpy(nr_node->mnemonic, mnemonic); 212 213 for (found = 0, i = 0; i < nr_node->count; i++) { 214 if (nr_node->routes[i].neighbour == nr_neigh) { 215 nr_node->routes[i].quality = quality; 216 nr_node->routes[i].obs_count = obs_count; 217 found = 1; 218 break; 219 } 220 } 221 222 if (!found) { 223 /* We have space at the bottom, slot it in */ 224 if (nr_node->count < 3) { 225 nr_node->routes[2] = nr_node->routes[1]; 226 nr_node->routes[1] = nr_node->routes[0]; 227 228 nr_node->routes[0].quality = quality; 229 nr_node->routes[0].obs_count = obs_count; 230 nr_node->routes[0].neighbour = nr_neigh; 231 232 nr_node->which++; 233 nr_node->count++; 234 nr_neigh_hold(nr_neigh); 235 nr_neigh->count++; 236 } else { 237 /* It must be better than the worst */ 238 if (quality > nr_node->routes[2].quality) { 239 nr_node->routes[2].neighbour->count--; 240 nr_neigh_put(nr_node->routes[2].neighbour); 241 242 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked) 243 nr_remove_neigh(nr_node->routes[2].neighbour); 244 245 nr_node->routes[2].quality = quality; 246 nr_node->routes[2].obs_count = obs_count; 247 nr_node->routes[2].neighbour = nr_neigh; 248 249 nr_neigh_hold(nr_neigh); 250 nr_neigh->count++; 251 } 252 } 253 } 254 255 /* Now re-sort the routes in quality order */ 256 switch (nr_node->count) { 257 case 3: 258 if (nr_node->routes[1].quality > nr_node->routes[0].quality) { 259 switch (nr_node->which) { 260 case 0: 261 nr_node->which = 1; 262 break; 263 case 1: 264 nr_node->which = 0; 265 break; 266 } 267 nr_route = nr_node->routes[0]; 268 nr_node->routes[0] = nr_node->routes[1]; 269 nr_node->routes[1] = nr_route; 270 } 271 if (nr_node->routes[2].quality > nr_node->routes[1].quality) { 272 switch (nr_node->which) { 273 case 1: nr_node->which = 2; 274 break; 275 276 case 2: nr_node->which = 1; 277 break; 278 279 default: 280 break; 281 } 282 nr_route = nr_node->routes[1]; 283 nr_node->routes[1] = nr_node->routes[2]; 284 nr_node->routes[2] = nr_route; 285 } 286 case 2: 287 if (nr_node->routes[1].quality > nr_node->routes[0].quality) { 288 switch (nr_node->which) { 289 case 0: nr_node->which = 1; 290 break; 291 292 case 1: nr_node->which = 0; 293 break; 294 295 default: break; 296 } 297 nr_route = nr_node->routes[0]; 298 nr_node->routes[0] = nr_node->routes[1]; 299 nr_node->routes[1] = nr_route; 300 } 301 case 1: 302 break; 303 } 304 305 for (i = 0; i < nr_node->count; i++) { 306 if (nr_node->routes[i].neighbour == nr_neigh) { 307 if (i < nr_node->which) 308 nr_node->which = i; 309 break; 310 } 311 } 312 313 nr_neigh_put(nr_neigh); 314 nr_node_unlock(nr_node); 315 nr_node_put(nr_node); 316 return 0; 317 } 318 319 static inline void __nr_remove_node(struct nr_node *nr_node) 320 { 321 hlist_del_init(&nr_node->node_node); 322 nr_node_put(nr_node); 323 } 324 325 #define nr_remove_node_locked(__node) \ 326 __nr_remove_node(__node) 327 328 static void nr_remove_node(struct nr_node *nr_node) 329 { 330 spin_lock_bh(&nr_node_list_lock); 331 __nr_remove_node(nr_node); 332 spin_unlock_bh(&nr_node_list_lock); 333 } 334 335 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh) 336 { 337 hlist_del_init(&nr_neigh->neigh_node); 338 nr_neigh_put(nr_neigh); 339 } 340 341 #define nr_remove_neigh_locked(__neigh) \ 342 __nr_remove_neigh(__neigh) 343 344 static void nr_remove_neigh(struct nr_neigh *nr_neigh) 345 { 346 spin_lock_bh(&nr_neigh_list_lock); 347 __nr_remove_neigh(nr_neigh); 348 spin_unlock_bh(&nr_neigh_list_lock); 349 } 350 351 /* 352 * "Delete" a node. Strictly speaking remove a route to a node. The node 353 * is only deleted if no routes are left to it. 354 */ 355 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev) 356 { 357 struct nr_node *nr_node; 358 struct nr_neigh *nr_neigh; 359 int i; 360 361 nr_node = nr_node_get(callsign); 362 363 if (nr_node == NULL) 364 return -EINVAL; 365 366 nr_neigh = nr_neigh_get_dev(neighbour, dev); 367 368 if (nr_neigh == NULL) { 369 nr_node_put(nr_node); 370 return -EINVAL; 371 } 372 373 nr_node_lock(nr_node); 374 for (i = 0; i < nr_node->count; i++) { 375 if (nr_node->routes[i].neighbour == nr_neigh) { 376 nr_neigh->count--; 377 nr_neigh_put(nr_neigh); 378 379 if (nr_neigh->count == 0 && !nr_neigh->locked) 380 nr_remove_neigh(nr_neigh); 381 nr_neigh_put(nr_neigh); 382 383 nr_node->count--; 384 385 if (nr_node->count == 0) { 386 nr_remove_node(nr_node); 387 } else { 388 switch (i) { 389 case 0: 390 nr_node->routes[0] = nr_node->routes[1]; 391 case 1: 392 nr_node->routes[1] = nr_node->routes[2]; 393 case 2: 394 break; 395 } 396 nr_node_put(nr_node); 397 } 398 nr_node_unlock(nr_node); 399 400 return 0; 401 } 402 } 403 nr_neigh_put(nr_neigh); 404 nr_node_unlock(nr_node); 405 nr_node_put(nr_node); 406 407 return -EINVAL; 408 } 409 410 /* 411 * Lock a neighbour with a quality. 412 */ 413 static int __must_check nr_add_neigh(ax25_address *callsign, 414 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) 415 { 416 struct nr_neigh *nr_neigh; 417 418 nr_neigh = nr_neigh_get_dev(callsign, dev); 419 if (nr_neigh) { 420 nr_neigh->quality = quality; 421 nr_neigh->locked = 1; 422 nr_neigh_put(nr_neigh); 423 return 0; 424 } 425 426 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) 427 return -ENOMEM; 428 429 nr_neigh->callsign = *callsign; 430 nr_neigh->digipeat = NULL; 431 nr_neigh->ax25 = NULL; 432 nr_neigh->dev = dev; 433 nr_neigh->quality = quality; 434 nr_neigh->locked = 1; 435 nr_neigh->count = 0; 436 nr_neigh->number = nr_neigh_no++; 437 nr_neigh->failed = 0; 438 atomic_set(&nr_neigh->refcount, 1); 439 440 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 441 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), 442 GFP_KERNEL); 443 if (nr_neigh->digipeat == NULL) { 444 kfree(nr_neigh); 445 return -ENOMEM; 446 } 447 } 448 449 spin_lock_bh(&nr_neigh_list_lock); 450 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 451 /* refcount is initialized at 1 */ 452 spin_unlock_bh(&nr_neigh_list_lock); 453 454 return 0; 455 } 456 457 /* 458 * "Delete" a neighbour. The neighbour is only removed if the number 459 * of nodes that may use it is zero. 460 */ 461 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality) 462 { 463 struct nr_neigh *nr_neigh; 464 465 nr_neigh = nr_neigh_get_dev(callsign, dev); 466 467 if (nr_neigh == NULL) return -EINVAL; 468 469 nr_neigh->quality = quality; 470 nr_neigh->locked = 0; 471 472 if (nr_neigh->count == 0) 473 nr_remove_neigh(nr_neigh); 474 nr_neigh_put(nr_neigh); 475 476 return 0; 477 } 478 479 /* 480 * Decrement the obsolescence count by one. If a route is reduced to a 481 * count of zero, remove it. Also remove any unlocked neighbours with 482 * zero nodes routing via it. 483 */ 484 static int nr_dec_obs(void) 485 { 486 struct nr_neigh *nr_neigh; 487 struct nr_node *s; 488 struct hlist_node *node, *nodet; 489 int i; 490 491 spin_lock_bh(&nr_node_list_lock); 492 nr_node_for_each_safe(s, node, nodet, &nr_node_list) { 493 nr_node_lock(s); 494 for (i = 0; i < s->count; i++) { 495 switch (s->routes[i].obs_count) { 496 case 0: /* A locked entry */ 497 break; 498 499 case 1: /* From 1 -> 0 */ 500 nr_neigh = s->routes[i].neighbour; 501 502 nr_neigh->count--; 503 nr_neigh_put(nr_neigh); 504 505 if (nr_neigh->count == 0 && !nr_neigh->locked) 506 nr_remove_neigh(nr_neigh); 507 508 s->count--; 509 510 switch (i) { 511 case 0: 512 s->routes[0] = s->routes[1]; 513 /* Fallthrough */ 514 case 1: 515 s->routes[1] = s->routes[2]; 516 case 2: 517 break; 518 } 519 break; 520 521 default: 522 s->routes[i].obs_count--; 523 break; 524 525 } 526 } 527 528 if (s->count <= 0) 529 nr_remove_node_locked(s); 530 nr_node_unlock(s); 531 } 532 spin_unlock_bh(&nr_node_list_lock); 533 534 return 0; 535 } 536 537 /* 538 * A device has been removed. Remove its routes and neighbours. 539 */ 540 void nr_rt_device_down(struct net_device *dev) 541 { 542 struct nr_neigh *s; 543 struct hlist_node *node, *nodet, *node2, *node2t; 544 struct nr_node *t; 545 int i; 546 547 spin_lock_bh(&nr_neigh_list_lock); 548 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 549 if (s->dev == dev) { 550 spin_lock_bh(&nr_node_list_lock); 551 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) { 552 nr_node_lock(t); 553 for (i = 0; i < t->count; i++) { 554 if (t->routes[i].neighbour == s) { 555 t->count--; 556 557 switch (i) { 558 case 0: 559 t->routes[0] = t->routes[1]; 560 case 1: 561 t->routes[1] = t->routes[2]; 562 case 2: 563 break; 564 } 565 } 566 } 567 568 if (t->count <= 0) 569 nr_remove_node_locked(t); 570 nr_node_unlock(t); 571 } 572 spin_unlock_bh(&nr_node_list_lock); 573 574 nr_remove_neigh_locked(s); 575 } 576 } 577 spin_unlock_bh(&nr_neigh_list_lock); 578 } 579 580 /* 581 * Check that the device given is a valid AX.25 interface that is "up". 582 * Or a valid ethernet interface with an AX.25 callsign binding. 583 */ 584 static struct net_device *nr_ax25_dev_get(char *devname) 585 { 586 struct net_device *dev; 587 588 if ((dev = dev_get_by_name(&init_net, devname)) == NULL) 589 return NULL; 590 591 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) 592 return dev; 593 594 dev_put(dev); 595 return NULL; 596 } 597 598 /* 599 * Find the first active NET/ROM device, usually "nr0". 600 */ 601 struct net_device *nr_dev_first(void) 602 { 603 struct net_device *dev, *first = NULL; 604 605 rcu_read_lock(); 606 for_each_netdev_rcu(&init_net, dev) { 607 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) 608 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 609 first = dev; 610 } 611 if (first) 612 dev_hold(first); 613 rcu_read_unlock(); 614 615 return first; 616 } 617 618 /* 619 * Find the NET/ROM device for the given callsign. 620 */ 621 struct net_device *nr_dev_get(ax25_address *addr) 622 { 623 struct net_device *dev; 624 625 rcu_read_lock(); 626 for_each_netdev_rcu(&init_net, dev) { 627 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && 628 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { 629 dev_hold(dev); 630 goto out; 631 } 632 } 633 dev = NULL; 634 out: 635 rcu_read_unlock(); 636 return dev; 637 } 638 639 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis, 640 ax25_address *digipeaters) 641 { 642 int i; 643 644 if (ndigis == 0) 645 return NULL; 646 647 for (i = 0; i < ndigis; i++) { 648 digi->calls[i] = digipeaters[i]; 649 digi->repeated[i] = 0; 650 } 651 652 digi->ndigi = ndigis; 653 digi->lastrepeat = -1; 654 655 return digi; 656 } 657 658 /* 659 * Handle the ioctls that control the routing functions. 660 */ 661 int nr_rt_ioctl(unsigned int cmd, void __user *arg) 662 { 663 struct nr_route_struct nr_route; 664 struct net_device *dev; 665 ax25_digi digi; 666 int ret; 667 668 switch (cmd) { 669 case SIOCADDRT: 670 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 671 return -EFAULT; 672 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 673 return -EINVAL; 674 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) { 675 dev_put(dev); 676 return -EINVAL; 677 } 678 switch (nr_route.type) { 679 case NETROM_NODE: 680 ret = nr_add_node(&nr_route.callsign, 681 nr_route.mnemonic, 682 &nr_route.neighbour, 683 nr_call_to_digi(&digi, nr_route.ndigis, 684 nr_route.digipeaters), 685 dev, nr_route.quality, 686 nr_route.obs_count); 687 break; 688 case NETROM_NEIGH: 689 ret = nr_add_neigh(&nr_route.callsign, 690 nr_call_to_digi(&digi, nr_route.ndigis, 691 nr_route.digipeaters), 692 dev, nr_route.quality); 693 break; 694 default: 695 ret = -EINVAL; 696 } 697 dev_put(dev); 698 return ret; 699 700 case SIOCDELRT: 701 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 702 return -EFAULT; 703 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 704 return -EINVAL; 705 switch (nr_route.type) { 706 case NETROM_NODE: 707 ret = nr_del_node(&nr_route.callsign, 708 &nr_route.neighbour, dev); 709 break; 710 case NETROM_NEIGH: 711 ret = nr_del_neigh(&nr_route.callsign, 712 dev, nr_route.quality); 713 break; 714 default: 715 ret = -EINVAL; 716 } 717 dev_put(dev); 718 return ret; 719 720 case SIOCNRDECOBS: 721 return nr_dec_obs(); 722 723 default: 724 return -EINVAL; 725 } 726 727 return 0; 728 } 729 730 /* 731 * A level 2 link has timed out, therefore it appears to be a poor link, 732 * then don't use that neighbour until it is reset. 733 */ 734 void nr_link_failed(ax25_cb *ax25, int reason) 735 { 736 struct nr_neigh *s, *nr_neigh = NULL; 737 struct hlist_node *node; 738 struct nr_node *nr_node = NULL; 739 740 spin_lock_bh(&nr_neigh_list_lock); 741 nr_neigh_for_each(s, node, &nr_neigh_list) { 742 if (s->ax25 == ax25) { 743 nr_neigh_hold(s); 744 nr_neigh = s; 745 break; 746 } 747 } 748 spin_unlock_bh(&nr_neigh_list_lock); 749 750 if (nr_neigh == NULL) 751 return; 752 753 nr_neigh->ax25 = NULL; 754 ax25_cb_put(ax25); 755 756 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) { 757 nr_neigh_put(nr_neigh); 758 return; 759 } 760 spin_lock_bh(&nr_node_list_lock); 761 nr_node_for_each(nr_node, node, &nr_node_list) { 762 nr_node_lock(nr_node); 763 if (nr_node->which < nr_node->count && 764 nr_node->routes[nr_node->which].neighbour == nr_neigh) 765 nr_node->which++; 766 nr_node_unlock(nr_node); 767 } 768 spin_unlock_bh(&nr_node_list_lock); 769 nr_neigh_put(nr_neigh); 770 } 771 772 /* 773 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb 774 * indicates an internally generated frame. 775 */ 776 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) 777 { 778 ax25_address *nr_src, *nr_dest; 779 struct nr_neigh *nr_neigh; 780 struct nr_node *nr_node; 781 struct net_device *dev; 782 unsigned char *dptr; 783 ax25_cb *ax25s; 784 int ret; 785 struct sk_buff *skbn; 786 787 788 nr_src = (ax25_address *)(skb->data + 0); 789 nr_dest = (ax25_address *)(skb->data + 7); 790 791 if (ax25 != NULL) { 792 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, 793 ax25->ax25_dev->dev, 0, 794 sysctl_netrom_obsolescence_count_initialiser); 795 if (ret) 796 return ret; 797 } 798 799 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ 800 if (ax25 == NULL) /* Its from me */ 801 ret = nr_loopback_queue(skb); 802 else 803 ret = nr_rx_frame(skb, dev); 804 dev_put(dev); 805 return ret; 806 } 807 808 if (!sysctl_netrom_routing_control && ax25 != NULL) 809 return 0; 810 811 /* Its Time-To-Live has expired */ 812 if (skb->data[14] == 1) { 813 return 0; 814 } 815 816 nr_node = nr_node_get(nr_dest); 817 if (nr_node == NULL) 818 return 0; 819 nr_node_lock(nr_node); 820 821 if (nr_node->which >= nr_node->count) { 822 nr_node_unlock(nr_node); 823 nr_node_put(nr_node); 824 return 0; 825 } 826 827 nr_neigh = nr_node->routes[nr_node->which].neighbour; 828 829 if ((dev = nr_dev_first()) == NULL) { 830 nr_node_unlock(nr_node); 831 nr_node_put(nr_node); 832 return 0; 833 } 834 835 /* We are going to change the netrom headers so we should get our 836 own skb, we also did not know until now how much header space 837 we had to reserve... - RXQ */ 838 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { 839 nr_node_unlock(nr_node); 840 nr_node_put(nr_node); 841 dev_put(dev); 842 return 0; 843 } 844 kfree_skb(skb); 845 skb=skbn; 846 skb->data[14]--; 847 848 dptr = skb_push(skb, 1); 849 *dptr = AX25_P_NETROM; 850 851 ax25s = nr_neigh->ax25; 852 nr_neigh->ax25 = ax25_send_frame(skb, 256, 853 (ax25_address *)dev->dev_addr, 854 &nr_neigh->callsign, 855 nr_neigh->digipeat, nr_neigh->dev); 856 if (ax25s) 857 ax25_cb_put(ax25s); 858 859 dev_put(dev); 860 ret = (nr_neigh->ax25 != NULL); 861 nr_node_unlock(nr_node); 862 nr_node_put(nr_node); 863 864 return ret; 865 } 866 867 #ifdef CONFIG_PROC_FS 868 869 static void *nr_node_start(struct seq_file *seq, loff_t *pos) 870 { 871 spin_lock_bh(&nr_node_list_lock); 872 return seq_hlist_start_head(&nr_node_list, *pos); 873 } 874 875 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) 876 { 877 return seq_hlist_next(v, &nr_node_list, pos); 878 } 879 880 static void nr_node_stop(struct seq_file *seq, void *v) 881 { 882 spin_unlock_bh(&nr_node_list_lock); 883 } 884 885 static int nr_node_show(struct seq_file *seq, void *v) 886 { 887 char buf[11]; 888 int i; 889 890 if (v == SEQ_START_TOKEN) 891 seq_puts(seq, 892 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); 893 else { 894 struct nr_node *nr_node = hlist_entry(v, struct nr_node, 895 node_node); 896 897 nr_node_lock(nr_node); 898 seq_printf(seq, "%-9s %-7s %d %d", 899 ax2asc(buf, &nr_node->callsign), 900 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic, 901 nr_node->which + 1, 902 nr_node->count); 903 904 for (i = 0; i < nr_node->count; i++) { 905 seq_printf(seq, " %3d %d %05d", 906 nr_node->routes[i].quality, 907 nr_node->routes[i].obs_count, 908 nr_node->routes[i].neighbour->number); 909 } 910 nr_node_unlock(nr_node); 911 912 seq_puts(seq, "\n"); 913 } 914 return 0; 915 } 916 917 static const struct seq_operations nr_node_seqops = { 918 .start = nr_node_start, 919 .next = nr_node_next, 920 .stop = nr_node_stop, 921 .show = nr_node_show, 922 }; 923 924 static int nr_node_info_open(struct inode *inode, struct file *file) 925 { 926 return seq_open(file, &nr_node_seqops); 927 } 928 929 const struct file_operations nr_nodes_fops = { 930 .owner = THIS_MODULE, 931 .open = nr_node_info_open, 932 .read = seq_read, 933 .llseek = seq_lseek, 934 .release = seq_release, 935 }; 936 937 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) 938 { 939 spin_lock_bh(&nr_neigh_list_lock); 940 return seq_hlist_start_head(&nr_neigh_list, *pos); 941 } 942 943 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 944 { 945 return seq_hlist_next(v, &nr_neigh_list, pos); 946 } 947 948 static void nr_neigh_stop(struct seq_file *seq, void *v) 949 { 950 spin_unlock_bh(&nr_neigh_list_lock); 951 } 952 953 static int nr_neigh_show(struct seq_file *seq, void *v) 954 { 955 char buf[11]; 956 int i; 957 958 if (v == SEQ_START_TOKEN) 959 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); 960 else { 961 struct nr_neigh *nr_neigh; 962 963 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node); 964 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 965 nr_neigh->number, 966 ax2asc(buf, &nr_neigh->callsign), 967 nr_neigh->dev ? nr_neigh->dev->name : "???", 968 nr_neigh->quality, 969 nr_neigh->locked, 970 nr_neigh->count, 971 nr_neigh->failed); 972 973 if (nr_neigh->digipeat != NULL) { 974 for (i = 0; i < nr_neigh->digipeat->ndigi; i++) 975 seq_printf(seq, " %s", 976 ax2asc(buf, &nr_neigh->digipeat->calls[i])); 977 } 978 979 seq_puts(seq, "\n"); 980 } 981 return 0; 982 } 983 984 static const struct seq_operations nr_neigh_seqops = { 985 .start = nr_neigh_start, 986 .next = nr_neigh_next, 987 .stop = nr_neigh_stop, 988 .show = nr_neigh_show, 989 }; 990 991 static int nr_neigh_info_open(struct inode *inode, struct file *file) 992 { 993 return seq_open(file, &nr_neigh_seqops); 994 } 995 996 const struct file_operations nr_neigh_fops = { 997 .owner = THIS_MODULE, 998 .open = nr_neigh_info_open, 999 .read = seq_read, 1000 .llseek = seq_lseek, 1001 .release = seq_release, 1002 }; 1003 1004 #endif 1005 1006 /* 1007 * Free all memory associated with the nodes and routes lists. 1008 */ 1009 void __exit nr_rt_free(void) 1010 { 1011 struct nr_neigh *s = NULL; 1012 struct nr_node *t = NULL; 1013 struct hlist_node *node, *nodet; 1014 1015 spin_lock_bh(&nr_neigh_list_lock); 1016 spin_lock_bh(&nr_node_list_lock); 1017 nr_node_for_each_safe(t, node, nodet, &nr_node_list) { 1018 nr_node_lock(t); 1019 nr_remove_node_locked(t); 1020 nr_node_unlock(t); 1021 } 1022 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 1023 while(s->count) { 1024 s->count--; 1025 nr_neigh_put(s); 1026 } 1027 nr_remove_neigh_locked(s); 1028 } 1029 spin_unlock_bh(&nr_node_list_lock); 1030 spin_unlock_bh(&nr_neigh_list_lock); 1031 } 1032