1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) 10 */ 11 #include <linux/errno.h> 12 #include <linux/types.h> 13 #include <linux/socket.h> 14 #include <linux/in.h> 15 #include <linux/kernel.h> 16 #include <linux/timer.h> 17 #include <linux/string.h> 18 #include <linux/sockios.h> 19 #include <linux/net.h> 20 #include <net/ax25.h> 21 #include <linux/inet.h> 22 #include <linux/netdevice.h> 23 #include <net/arp.h> 24 #include <linux/if_arp.h> 25 #include <linux/skbuff.h> 26 #include <net/sock.h> 27 #include <asm/uaccess.h> 28 #include <asm/system.h> 29 #include <linux/fcntl.h> 30 #include <linux/termios.h> /* For TIOCINQ/OUTQ */ 31 #include <linux/mm.h> 32 #include <linux/interrupt.h> 33 #include <linux/notifier.h> 34 #include <linux/netfilter.h> 35 #include <linux/init.h> 36 #include <linux/spinlock.h> 37 #include <net/netrom.h> 38 #include <linux/seq_file.h> 39 40 static unsigned int nr_neigh_no = 1; 41 42 static HLIST_HEAD(nr_node_list); 43 static DEFINE_SPINLOCK(nr_node_list_lock); 44 static HLIST_HEAD(nr_neigh_list); 45 static DEFINE_SPINLOCK(nr_neigh_list_lock); 46 47 static struct nr_node *nr_node_get(ax25_address *callsign) 48 { 49 struct nr_node *found = NULL; 50 struct nr_node *nr_node; 51 struct hlist_node *node; 52 53 spin_lock_bh(&nr_node_list_lock); 54 nr_node_for_each(nr_node, node, &nr_node_list) 55 if (ax25cmp(callsign, &nr_node->callsign) == 0) { 56 nr_node_hold(nr_node); 57 found = nr_node; 58 break; 59 } 60 spin_unlock_bh(&nr_node_list_lock); 61 return found; 62 } 63 64 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, 65 struct net_device *dev) 66 { 67 struct nr_neigh *found = NULL; 68 struct nr_neigh *nr_neigh; 69 struct hlist_node *node; 70 71 spin_lock_bh(&nr_neigh_list_lock); 72 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) 73 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && 74 nr_neigh->dev == dev) { 75 nr_neigh_hold(nr_neigh); 76 found = nr_neigh; 77 break; 78 } 79 spin_unlock_bh(&nr_neigh_list_lock); 80 return found; 81 } 82 83 static void nr_remove_neigh(struct nr_neigh *); 84 85 /* 86 * Add a new route to a node, and in the process add the node and the 87 * neighbour if it is new. 88 */ 89 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, 90 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev, 91 int quality, int obs_count) 92 { 93 struct nr_node *nr_node; 94 struct nr_neigh *nr_neigh; 95 struct nr_route nr_route; 96 int i, found; 97 struct net_device *odev; 98 99 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */ 100 dev_put(odev); 101 return -EINVAL; 102 } 103 104 nr_node = nr_node_get(nr); 105 106 nr_neigh = nr_neigh_get_dev(ax25, dev); 107 108 /* 109 * The L2 link to a neighbour has failed in the past 110 * and now a frame comes from this neighbour. We assume 111 * it was a temporary trouble with the link and reset the 112 * routes now (and not wait for a node broadcast). 113 */ 114 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { 115 struct nr_node *nr_nodet; 116 struct hlist_node *node; 117 118 spin_lock_bh(&nr_node_list_lock); 119 nr_node_for_each(nr_nodet, node, &nr_node_list) { 120 nr_node_lock(nr_nodet); 121 for (i = 0; i < nr_nodet->count; i++) 122 if (nr_nodet->routes[i].neighbour == nr_neigh) 123 if (i < nr_nodet->which) 124 nr_nodet->which = i; 125 nr_node_unlock(nr_nodet); 126 } 127 spin_unlock_bh(&nr_node_list_lock); 128 } 129 130 if (nr_neigh != NULL) 131 nr_neigh->failed = 0; 132 133 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) { 134 nr_neigh_put(nr_neigh); 135 nr_node_put(nr_node); 136 return 0; 137 } 138 139 if (nr_neigh == NULL) { 140 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) { 141 if (nr_node) 142 nr_node_put(nr_node); 143 return -ENOMEM; 144 } 145 146 nr_neigh->callsign = *ax25; 147 nr_neigh->digipeat = NULL; 148 nr_neigh->ax25 = NULL; 149 nr_neigh->dev = dev; 150 nr_neigh->quality = sysctl_netrom_default_path_quality; 151 nr_neigh->locked = 0; 152 nr_neigh->count = 0; 153 nr_neigh->number = nr_neigh_no++; 154 nr_neigh->failed = 0; 155 atomic_set(&nr_neigh->refcount, 1); 156 157 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 158 nr_neigh->digipeat = kmemdup(ax25_digi, 159 sizeof(*ax25_digi), 160 GFP_KERNEL); 161 if (nr_neigh->digipeat == NULL) { 162 kfree(nr_neigh); 163 if (nr_node) 164 nr_node_put(nr_node); 165 return -ENOMEM; 166 } 167 } 168 169 spin_lock_bh(&nr_neigh_list_lock); 170 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 171 nr_neigh_hold(nr_neigh); 172 spin_unlock_bh(&nr_neigh_list_lock); 173 } 174 175 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked) 176 nr_neigh->quality = quality; 177 178 if (nr_node == NULL) { 179 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) { 180 if (nr_neigh) 181 nr_neigh_put(nr_neigh); 182 return -ENOMEM; 183 } 184 185 nr_node->callsign = *nr; 186 strcpy(nr_node->mnemonic, mnemonic); 187 188 nr_node->which = 0; 189 nr_node->count = 1; 190 atomic_set(&nr_node->refcount, 1); 191 spin_lock_init(&nr_node->node_lock); 192 193 nr_node->routes[0].quality = quality; 194 nr_node->routes[0].obs_count = obs_count; 195 nr_node->routes[0].neighbour = nr_neigh; 196 197 nr_neigh_hold(nr_neigh); 198 nr_neigh->count++; 199 200 spin_lock_bh(&nr_node_list_lock); 201 hlist_add_head(&nr_node->node_node, &nr_node_list); 202 /* refcount initialized at 1 */ 203 spin_unlock_bh(&nr_node_list_lock); 204 205 return 0; 206 } 207 nr_node_lock(nr_node); 208 209 if (quality != 0) 210 strcpy(nr_node->mnemonic, mnemonic); 211 212 for (found = 0, i = 0; i < nr_node->count; i++) { 213 if (nr_node->routes[i].neighbour == nr_neigh) { 214 nr_node->routes[i].quality = quality; 215 nr_node->routes[i].obs_count = obs_count; 216 found = 1; 217 break; 218 } 219 } 220 221 if (!found) { 222 /* We have space at the bottom, slot it in */ 223 if (nr_node->count < 3) { 224 nr_node->routes[2] = nr_node->routes[1]; 225 nr_node->routes[1] = nr_node->routes[0]; 226 227 nr_node->routes[0].quality = quality; 228 nr_node->routes[0].obs_count = obs_count; 229 nr_node->routes[0].neighbour = nr_neigh; 230 231 nr_node->which++; 232 nr_node->count++; 233 nr_neigh_hold(nr_neigh); 234 nr_neigh->count++; 235 } else { 236 /* It must be better than the worst */ 237 if (quality > nr_node->routes[2].quality) { 238 nr_node->routes[2].neighbour->count--; 239 nr_neigh_put(nr_node->routes[2].neighbour); 240 241 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked) 242 nr_remove_neigh(nr_node->routes[2].neighbour); 243 244 nr_node->routes[2].quality = quality; 245 nr_node->routes[2].obs_count = obs_count; 246 nr_node->routes[2].neighbour = nr_neigh; 247 248 nr_neigh_hold(nr_neigh); 249 nr_neigh->count++; 250 } 251 } 252 } 253 254 /* Now re-sort the routes in quality order */ 255 switch (nr_node->count) { 256 case 3: 257 if (nr_node->routes[1].quality > nr_node->routes[0].quality) { 258 switch (nr_node->which) { 259 case 0: nr_node->which = 1; break; 260 case 1: nr_node->which = 0; break; 261 default: break; 262 } 263 nr_route = nr_node->routes[0]; 264 nr_node->routes[0] = nr_node->routes[1]; 265 nr_node->routes[1] = nr_route; 266 } 267 if (nr_node->routes[2].quality > nr_node->routes[1].quality) { 268 switch (nr_node->which) { 269 case 1: nr_node->which = 2; 270 break; 271 272 case 2: nr_node->which = 1; 273 break; 274 275 default: 276 break; 277 } 278 nr_route = nr_node->routes[1]; 279 nr_node->routes[1] = nr_node->routes[2]; 280 nr_node->routes[2] = nr_route; 281 } 282 case 2: 283 if (nr_node->routes[1].quality > nr_node->routes[0].quality) { 284 switch (nr_node->which) { 285 case 0: nr_node->which = 1; 286 break; 287 288 case 1: nr_node->which = 0; 289 break; 290 291 default: break; 292 } 293 nr_route = nr_node->routes[0]; 294 nr_node->routes[0] = nr_node->routes[1]; 295 nr_node->routes[1] = nr_route; 296 } 297 case 1: 298 break; 299 } 300 301 for (i = 0; i < nr_node->count; i++) { 302 if (nr_node->routes[i].neighbour == nr_neigh) { 303 if (i < nr_node->which) 304 nr_node->which = i; 305 break; 306 } 307 } 308 309 nr_neigh_put(nr_neigh); 310 nr_node_unlock(nr_node); 311 nr_node_put(nr_node); 312 return 0; 313 } 314 315 static inline void __nr_remove_node(struct nr_node *nr_node) 316 { 317 hlist_del_init(&nr_node->node_node); 318 nr_node_put(nr_node); 319 } 320 321 #define nr_remove_node_locked(__node) \ 322 __nr_remove_node(__node) 323 324 static void nr_remove_node(struct nr_node *nr_node) 325 { 326 spin_lock_bh(&nr_node_list_lock); 327 __nr_remove_node(nr_node); 328 spin_unlock_bh(&nr_node_list_lock); 329 } 330 331 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh) 332 { 333 hlist_del_init(&nr_neigh->neigh_node); 334 nr_neigh_put(nr_neigh); 335 } 336 337 #define nr_remove_neigh_locked(__neigh) \ 338 __nr_remove_neigh(__neigh) 339 340 static void nr_remove_neigh(struct nr_neigh *nr_neigh) 341 { 342 spin_lock_bh(&nr_neigh_list_lock); 343 __nr_remove_neigh(nr_neigh); 344 spin_unlock_bh(&nr_neigh_list_lock); 345 } 346 347 /* 348 * "Delete" a node. Strictly speaking remove a route to a node. The node 349 * is only deleted if no routes are left to it. 350 */ 351 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev) 352 { 353 struct nr_node *nr_node; 354 struct nr_neigh *nr_neigh; 355 int i; 356 357 nr_node = nr_node_get(callsign); 358 359 if (nr_node == NULL) 360 return -EINVAL; 361 362 nr_neigh = nr_neigh_get_dev(neighbour, dev); 363 364 if (nr_neigh == NULL) { 365 nr_node_put(nr_node); 366 return -EINVAL; 367 } 368 369 nr_node_lock(nr_node); 370 for (i = 0; i < nr_node->count; i++) { 371 if (nr_node->routes[i].neighbour == nr_neigh) { 372 nr_neigh->count--; 373 nr_neigh_put(nr_neigh); 374 375 if (nr_neigh->count == 0 && !nr_neigh->locked) 376 nr_remove_neigh(nr_neigh); 377 nr_neigh_put(nr_neigh); 378 379 nr_node->count--; 380 381 if (nr_node->count == 0) { 382 nr_remove_node(nr_node); 383 } else { 384 switch (i) { 385 case 0: 386 nr_node->routes[0] = nr_node->routes[1]; 387 case 1: 388 nr_node->routes[1] = nr_node->routes[2]; 389 case 2: 390 break; 391 } 392 nr_node_put(nr_node); 393 } 394 nr_node_unlock(nr_node); 395 396 return 0; 397 } 398 } 399 nr_neigh_put(nr_neigh); 400 nr_node_unlock(nr_node); 401 nr_node_put(nr_node); 402 403 return -EINVAL; 404 } 405 406 /* 407 * Lock a neighbour with a quality. 408 */ 409 static int __must_check nr_add_neigh(ax25_address *callsign, 410 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) 411 { 412 struct nr_neigh *nr_neigh; 413 414 nr_neigh = nr_neigh_get_dev(callsign, dev); 415 if (nr_neigh) { 416 nr_neigh->quality = quality; 417 nr_neigh->locked = 1; 418 nr_neigh_put(nr_neigh); 419 return 0; 420 } 421 422 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) 423 return -ENOMEM; 424 425 nr_neigh->callsign = *callsign; 426 nr_neigh->digipeat = NULL; 427 nr_neigh->ax25 = NULL; 428 nr_neigh->dev = dev; 429 nr_neigh->quality = quality; 430 nr_neigh->locked = 1; 431 nr_neigh->count = 0; 432 nr_neigh->number = nr_neigh_no++; 433 nr_neigh->failed = 0; 434 atomic_set(&nr_neigh->refcount, 1); 435 436 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 437 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), 438 GFP_KERNEL); 439 if (nr_neigh->digipeat == NULL) { 440 kfree(nr_neigh); 441 return -ENOMEM; 442 } 443 } 444 445 spin_lock_bh(&nr_neigh_list_lock); 446 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 447 /* refcount is initialized at 1 */ 448 spin_unlock_bh(&nr_neigh_list_lock); 449 450 return 0; 451 } 452 453 /* 454 * "Delete" a neighbour. The neighbour is only removed if the number 455 * of nodes that may use it is zero. 456 */ 457 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality) 458 { 459 struct nr_neigh *nr_neigh; 460 461 nr_neigh = nr_neigh_get_dev(callsign, dev); 462 463 if (nr_neigh == NULL) return -EINVAL; 464 465 nr_neigh->quality = quality; 466 nr_neigh->locked = 0; 467 468 if (nr_neigh->count == 0) 469 nr_remove_neigh(nr_neigh); 470 nr_neigh_put(nr_neigh); 471 472 return 0; 473 } 474 475 /* 476 * Decrement the obsolescence count by one. If a route is reduced to a 477 * count of zero, remove it. Also remove any unlocked neighbours with 478 * zero nodes routing via it. 479 */ 480 static int nr_dec_obs(void) 481 { 482 struct nr_neigh *nr_neigh; 483 struct nr_node *s; 484 struct hlist_node *node, *nodet; 485 int i; 486 487 spin_lock_bh(&nr_node_list_lock); 488 nr_node_for_each_safe(s, node, nodet, &nr_node_list) { 489 nr_node_lock(s); 490 for (i = 0; i < s->count; i++) { 491 switch (s->routes[i].obs_count) { 492 case 0: /* A locked entry */ 493 break; 494 495 case 1: /* From 1 -> 0 */ 496 nr_neigh = s->routes[i].neighbour; 497 498 nr_neigh->count--; 499 nr_neigh_put(nr_neigh); 500 501 if (nr_neigh->count == 0 && !nr_neigh->locked) 502 nr_remove_neigh(nr_neigh); 503 504 s->count--; 505 506 switch (i) { 507 case 0: 508 s->routes[0] = s->routes[1]; 509 case 1: 510 s->routes[1] = s->routes[2]; 511 case 2: 512 break; 513 } 514 break; 515 516 default: 517 s->routes[i].obs_count--; 518 break; 519 520 } 521 } 522 523 if (s->count <= 0) 524 nr_remove_node_locked(s); 525 nr_node_unlock(s); 526 } 527 spin_unlock_bh(&nr_node_list_lock); 528 529 return 0; 530 } 531 532 /* 533 * A device has been removed. Remove its routes and neighbours. 534 */ 535 void nr_rt_device_down(struct net_device *dev) 536 { 537 struct nr_neigh *s; 538 struct hlist_node *node, *nodet, *node2, *node2t; 539 struct nr_node *t; 540 int i; 541 542 spin_lock_bh(&nr_neigh_list_lock); 543 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 544 if (s->dev == dev) { 545 spin_lock_bh(&nr_node_list_lock); 546 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) { 547 nr_node_lock(t); 548 for (i = 0; i < t->count; i++) { 549 if (t->routes[i].neighbour == s) { 550 t->count--; 551 552 switch (i) { 553 case 0: 554 t->routes[0] = t->routes[1]; 555 case 1: 556 t->routes[1] = t->routes[2]; 557 case 2: 558 break; 559 } 560 } 561 } 562 563 if (t->count <= 0) 564 nr_remove_node_locked(t); 565 nr_node_unlock(t); 566 } 567 spin_unlock_bh(&nr_node_list_lock); 568 569 nr_remove_neigh_locked(s); 570 } 571 } 572 spin_unlock_bh(&nr_neigh_list_lock); 573 } 574 575 /* 576 * Check that the device given is a valid AX.25 interface that is "up". 577 * Or a valid ethernet interface with an AX.25 callsign binding. 578 */ 579 static struct net_device *nr_ax25_dev_get(char *devname) 580 { 581 struct net_device *dev; 582 583 if ((dev = dev_get_by_name(devname)) == NULL) 584 return NULL; 585 586 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) 587 return dev; 588 589 dev_put(dev); 590 return NULL; 591 } 592 593 /* 594 * Find the first active NET/ROM device, usually "nr0". 595 */ 596 struct net_device *nr_dev_first(void) 597 { 598 struct net_device *dev, *first = NULL; 599 600 read_lock(&dev_base_lock); 601 for_each_netdev(dev) { 602 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) 603 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 604 first = dev; 605 } 606 if (first) 607 dev_hold(first); 608 read_unlock(&dev_base_lock); 609 610 return first; 611 } 612 613 /* 614 * Find the NET/ROM device for the given callsign. 615 */ 616 struct net_device *nr_dev_get(ax25_address *addr) 617 { 618 struct net_device *dev; 619 620 read_lock(&dev_base_lock); 621 for_each_netdev(dev) { 622 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { 623 dev_hold(dev); 624 goto out; 625 } 626 } 627 dev = NULL; 628 out: 629 read_unlock(&dev_base_lock); 630 return dev; 631 } 632 633 static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters) 634 { 635 static ax25_digi ax25_digi; 636 int i; 637 638 if (ndigis == 0) 639 return NULL; 640 641 for (i = 0; i < ndigis; i++) { 642 ax25_digi.calls[i] = digipeaters[i]; 643 ax25_digi.repeated[i] = 0; 644 } 645 646 ax25_digi.ndigi = ndigis; 647 ax25_digi.lastrepeat = -1; 648 649 return &ax25_digi; 650 } 651 652 /* 653 * Handle the ioctls that control the routing functions. 654 */ 655 int nr_rt_ioctl(unsigned int cmd, void __user *arg) 656 { 657 struct nr_route_struct nr_route; 658 struct net_device *dev; 659 int ret; 660 661 switch (cmd) { 662 case SIOCADDRT: 663 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 664 return -EFAULT; 665 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 666 return -EINVAL; 667 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) { 668 dev_put(dev); 669 return -EINVAL; 670 } 671 switch (nr_route.type) { 672 case NETROM_NODE: 673 ret = nr_add_node(&nr_route.callsign, 674 nr_route.mnemonic, 675 &nr_route.neighbour, 676 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters), 677 dev, nr_route.quality, 678 nr_route.obs_count); 679 break; 680 case NETROM_NEIGH: 681 ret = nr_add_neigh(&nr_route.callsign, 682 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters), 683 dev, nr_route.quality); 684 break; 685 default: 686 ret = -EINVAL; 687 } 688 dev_put(dev); 689 return ret; 690 691 case SIOCDELRT: 692 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 693 return -EFAULT; 694 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 695 return -EINVAL; 696 switch (nr_route.type) { 697 case NETROM_NODE: 698 ret = nr_del_node(&nr_route.callsign, 699 &nr_route.neighbour, dev); 700 break; 701 case NETROM_NEIGH: 702 ret = nr_del_neigh(&nr_route.callsign, 703 dev, nr_route.quality); 704 break; 705 default: 706 ret = -EINVAL; 707 } 708 dev_put(dev); 709 return ret; 710 711 case SIOCNRDECOBS: 712 return nr_dec_obs(); 713 714 default: 715 return -EINVAL; 716 } 717 718 return 0; 719 } 720 721 /* 722 * A level 2 link has timed out, therefore it appears to be a poor link, 723 * then don't use that neighbour until it is reset. 724 */ 725 void nr_link_failed(ax25_cb *ax25, int reason) 726 { 727 struct nr_neigh *s, *nr_neigh = NULL; 728 struct hlist_node *node; 729 struct nr_node *nr_node = NULL; 730 731 spin_lock_bh(&nr_neigh_list_lock); 732 nr_neigh_for_each(s, node, &nr_neigh_list) { 733 if (s->ax25 == ax25) { 734 nr_neigh_hold(s); 735 nr_neigh = s; 736 break; 737 } 738 } 739 spin_unlock_bh(&nr_neigh_list_lock); 740 741 if (nr_neigh == NULL) 742 return; 743 744 nr_neigh->ax25 = NULL; 745 ax25_cb_put(ax25); 746 747 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) { 748 nr_neigh_put(nr_neigh); 749 return; 750 } 751 spin_lock_bh(&nr_node_list_lock); 752 nr_node_for_each(nr_node, node, &nr_node_list) { 753 nr_node_lock(nr_node); 754 if (nr_node->which < nr_node->count && 755 nr_node->routes[nr_node->which].neighbour == nr_neigh) 756 nr_node->which++; 757 nr_node_unlock(nr_node); 758 } 759 spin_unlock_bh(&nr_node_list_lock); 760 nr_neigh_put(nr_neigh); 761 } 762 763 /* 764 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb 765 * indicates an internally generated frame. 766 */ 767 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) 768 { 769 ax25_address *nr_src, *nr_dest; 770 struct nr_neigh *nr_neigh; 771 struct nr_node *nr_node; 772 struct net_device *dev; 773 unsigned char *dptr; 774 ax25_cb *ax25s; 775 int ret; 776 struct sk_buff *skbn; 777 778 779 nr_src = (ax25_address *)(skb->data + 0); 780 nr_dest = (ax25_address *)(skb->data + 7); 781 782 if (ax25 != NULL) { 783 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, 784 ax25->ax25_dev->dev, 0, 785 sysctl_netrom_obsolescence_count_initialiser); 786 if (ret) 787 return ret; 788 } 789 790 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ 791 if (ax25 == NULL) /* Its from me */ 792 ret = nr_loopback_queue(skb); 793 else 794 ret = nr_rx_frame(skb, dev); 795 dev_put(dev); 796 return ret; 797 } 798 799 if (!sysctl_netrom_routing_control && ax25 != NULL) 800 return 0; 801 802 /* Its Time-To-Live has expired */ 803 if (skb->data[14] == 1) { 804 return 0; 805 } 806 807 nr_node = nr_node_get(nr_dest); 808 if (nr_node == NULL) 809 return 0; 810 nr_node_lock(nr_node); 811 812 if (nr_node->which >= nr_node->count) { 813 nr_node_unlock(nr_node); 814 nr_node_put(nr_node); 815 return 0; 816 } 817 818 nr_neigh = nr_node->routes[nr_node->which].neighbour; 819 820 if ((dev = nr_dev_first()) == NULL) { 821 nr_node_unlock(nr_node); 822 nr_node_put(nr_node); 823 return 0; 824 } 825 826 /* We are going to change the netrom headers so we should get our 827 own skb, we also did not know until now how much header space 828 we had to reserve... - RXQ */ 829 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { 830 nr_node_unlock(nr_node); 831 nr_node_put(nr_node); 832 dev_put(dev); 833 return 0; 834 } 835 kfree_skb(skb); 836 skb=skbn; 837 skb->data[14]--; 838 839 dptr = skb_push(skb, 1); 840 *dptr = AX25_P_NETROM; 841 842 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); 843 if (nr_neigh->ax25 && ax25s) { 844 /* We were already holding this ax25_cb */ 845 ax25_cb_put(ax25s); 846 } 847 nr_neigh->ax25 = ax25s; 848 849 dev_put(dev); 850 ret = (nr_neigh->ax25 != NULL); 851 nr_node_unlock(nr_node); 852 nr_node_put(nr_node); 853 854 return ret; 855 } 856 857 #ifdef CONFIG_PROC_FS 858 859 static void *nr_node_start(struct seq_file *seq, loff_t *pos) 860 { 861 struct nr_node *nr_node; 862 struct hlist_node *node; 863 int i = 1; 864 865 spin_lock_bh(&nr_node_list_lock); 866 if (*pos == 0) 867 return SEQ_START_TOKEN; 868 869 nr_node_for_each(nr_node, node, &nr_node_list) { 870 if (i == *pos) 871 return nr_node; 872 ++i; 873 } 874 875 return NULL; 876 } 877 878 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) 879 { 880 struct hlist_node *node; 881 ++*pos; 882 883 node = (v == SEQ_START_TOKEN) 884 ? nr_node_list.first 885 : ((struct nr_node *)v)->node_node.next; 886 887 return hlist_entry(node, struct nr_node, node_node); 888 } 889 890 static void nr_node_stop(struct seq_file *seq, void *v) 891 { 892 spin_unlock_bh(&nr_node_list_lock); 893 } 894 895 static int nr_node_show(struct seq_file *seq, void *v) 896 { 897 char buf[11]; 898 int i; 899 900 if (v == SEQ_START_TOKEN) 901 seq_puts(seq, 902 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); 903 else { 904 struct nr_node *nr_node = v; 905 nr_node_lock(nr_node); 906 seq_printf(seq, "%-9s %-7s %d %d", 907 ax2asc(buf, &nr_node->callsign), 908 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic, 909 nr_node->which + 1, 910 nr_node->count); 911 912 for (i = 0; i < nr_node->count; i++) { 913 seq_printf(seq, " %3d %d %05d", 914 nr_node->routes[i].quality, 915 nr_node->routes[i].obs_count, 916 nr_node->routes[i].neighbour->number); 917 } 918 nr_node_unlock(nr_node); 919 920 seq_puts(seq, "\n"); 921 } 922 return 0; 923 } 924 925 static struct seq_operations nr_node_seqops = { 926 .start = nr_node_start, 927 .next = nr_node_next, 928 .stop = nr_node_stop, 929 .show = nr_node_show, 930 }; 931 932 static int nr_node_info_open(struct inode *inode, struct file *file) 933 { 934 return seq_open(file, &nr_node_seqops); 935 } 936 937 const struct file_operations nr_nodes_fops = { 938 .owner = THIS_MODULE, 939 .open = nr_node_info_open, 940 .read = seq_read, 941 .llseek = seq_lseek, 942 .release = seq_release, 943 }; 944 945 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) 946 { 947 struct nr_neigh *nr_neigh; 948 struct hlist_node *node; 949 int i = 1; 950 951 spin_lock_bh(&nr_neigh_list_lock); 952 if (*pos == 0) 953 return SEQ_START_TOKEN; 954 955 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) { 956 if (i == *pos) 957 return nr_neigh; 958 } 959 return NULL; 960 } 961 962 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 963 { 964 struct hlist_node *node; 965 ++*pos; 966 967 node = (v == SEQ_START_TOKEN) 968 ? nr_neigh_list.first 969 : ((struct nr_neigh *)v)->neigh_node.next; 970 971 return hlist_entry(node, struct nr_neigh, neigh_node); 972 } 973 974 static void nr_neigh_stop(struct seq_file *seq, void *v) 975 { 976 spin_unlock_bh(&nr_neigh_list_lock); 977 } 978 979 static int nr_neigh_show(struct seq_file *seq, void *v) 980 { 981 char buf[11]; 982 int i; 983 984 if (v == SEQ_START_TOKEN) 985 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); 986 else { 987 struct nr_neigh *nr_neigh = v; 988 989 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 990 nr_neigh->number, 991 ax2asc(buf, &nr_neigh->callsign), 992 nr_neigh->dev ? nr_neigh->dev->name : "???", 993 nr_neigh->quality, 994 nr_neigh->locked, 995 nr_neigh->count, 996 nr_neigh->failed); 997 998 if (nr_neigh->digipeat != NULL) { 999 for (i = 0; i < nr_neigh->digipeat->ndigi; i++) 1000 seq_printf(seq, " %s", 1001 ax2asc(buf, &nr_neigh->digipeat->calls[i])); 1002 } 1003 1004 seq_puts(seq, "\n"); 1005 } 1006 return 0; 1007 } 1008 1009 static struct seq_operations nr_neigh_seqops = { 1010 .start = nr_neigh_start, 1011 .next = nr_neigh_next, 1012 .stop = nr_neigh_stop, 1013 .show = nr_neigh_show, 1014 }; 1015 1016 static int nr_neigh_info_open(struct inode *inode, struct file *file) 1017 { 1018 return seq_open(file, &nr_neigh_seqops); 1019 } 1020 1021 const struct file_operations nr_neigh_fops = { 1022 .owner = THIS_MODULE, 1023 .open = nr_neigh_info_open, 1024 .read = seq_read, 1025 .llseek = seq_lseek, 1026 .release = seq_release, 1027 }; 1028 1029 #endif 1030 1031 /* 1032 * Free all memory associated with the nodes and routes lists. 1033 */ 1034 void __exit nr_rt_free(void) 1035 { 1036 struct nr_neigh *s = NULL; 1037 struct nr_node *t = NULL; 1038 struct hlist_node *node, *nodet; 1039 1040 spin_lock_bh(&nr_neigh_list_lock); 1041 spin_lock_bh(&nr_node_list_lock); 1042 nr_node_for_each_safe(t, node, nodet, &nr_node_list) { 1043 nr_node_lock(t); 1044 nr_remove_node_locked(t); 1045 nr_node_unlock(t); 1046 } 1047 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 1048 while(s->count) { 1049 s->count--; 1050 nr_neigh_put(s); 1051 } 1052 nr_remove_neigh_locked(s); 1053 } 1054 spin_unlock_bh(&nr_node_list_lock); 1055 spin_unlock_bh(&nr_neigh_list_lock); 1056 } 1057