1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) 10 */ 11 #include <linux/errno.h> 12 #include <linux/types.h> 13 #include <linux/socket.h> 14 #include <linux/in.h> 15 #include <linux/kernel.h> 16 #include <linux/sched.h> 17 #include <linux/timer.h> 18 #include <linux/string.h> 19 #include <linux/sockios.h> 20 #include <linux/net.h> 21 #include <net/ax25.h> 22 #include <linux/inet.h> 23 #include <linux/netdevice.h> 24 #include <net/arp.h> 25 #include <linux/if_arp.h> 26 #include <linux/skbuff.h> 27 #include <net/sock.h> 28 #include <asm/uaccess.h> 29 #include <asm/system.h> 30 #include <linux/fcntl.h> 31 #include <linux/termios.h> /* For TIOCINQ/OUTQ */ 32 #include <linux/mm.h> 33 #include <linux/interrupt.h> 34 #include <linux/notifier.h> 35 #include <linux/netfilter.h> 36 #include <linux/init.h> 37 #include <linux/spinlock.h> 38 #include <net/netrom.h> 39 #include <linux/seq_file.h> 40 41 static unsigned int nr_neigh_no = 1; 42 43 static HLIST_HEAD(nr_node_list); 44 static DEFINE_SPINLOCK(nr_node_list_lock); 45 static HLIST_HEAD(nr_neigh_list); 46 static DEFINE_SPINLOCK(nr_neigh_list_lock); 47 48 static struct nr_node *nr_node_get(ax25_address *callsign) 49 { 50 struct nr_node *found = NULL; 51 struct nr_node *nr_node; 52 struct hlist_node *node; 53 54 spin_lock_bh(&nr_node_list_lock); 55 nr_node_for_each(nr_node, node, &nr_node_list) 56 if (ax25cmp(callsign, &nr_node->callsign) == 0) { 57 nr_node_hold(nr_node); 58 found = nr_node; 59 break; 60 } 61 spin_unlock_bh(&nr_node_list_lock); 62 return found; 63 } 64 65 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, 66 struct net_device *dev) 67 { 68 struct nr_neigh *found = NULL; 69 struct nr_neigh *nr_neigh; 70 struct hlist_node *node; 71 72 spin_lock_bh(&nr_neigh_list_lock); 73 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) 74 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && 75 nr_neigh->dev == dev) { 76 nr_neigh_hold(nr_neigh); 77 found = nr_neigh; 78 break; 79 } 80 spin_unlock_bh(&nr_neigh_list_lock); 81 return found; 82 } 83 84 static void nr_remove_neigh(struct nr_neigh *); 85 86 /* 87 * Add a new route to a node, and in the process add the node and the 88 * neighbour if it is new. 89 */ 90 static int nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax25, 91 ax25_digi *ax25_digi, struct net_device *dev, int quality, int obs_count) 92 { 93 struct nr_node *nr_node; 94 struct nr_neigh *nr_neigh; 95 struct nr_route nr_route; 96 int i, found; 97 struct net_device *odev; 98 99 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */ 100 dev_put(odev); 101 return -EINVAL; 102 } 103 104 nr_node = nr_node_get(nr); 105 106 nr_neigh = nr_neigh_get_dev(ax25, dev); 107 108 /* 109 * The L2 link to a neighbour has failed in the past 110 * and now a frame comes from this neighbour. We assume 111 * it was a temporary trouble with the link and reset the 112 * routes now (and not wait for a node broadcast). 113 */ 114 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { 115 struct nr_node *nr_nodet; 116 struct hlist_node *node; 117 118 spin_lock_bh(&nr_node_list_lock); 119 nr_node_for_each(nr_nodet, node, &nr_node_list) { 120 nr_node_lock(nr_nodet); 121 for (i = 0; i < nr_nodet->count; i++) 122 if (nr_nodet->routes[i].neighbour == nr_neigh) 123 if (i < nr_nodet->which) 124 nr_nodet->which = i; 125 nr_node_unlock(nr_nodet); 126 } 127 spin_unlock_bh(&nr_node_list_lock); 128 } 129 130 if (nr_neigh != NULL) 131 nr_neigh->failed = 0; 132 133 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) { 134 nr_neigh_put(nr_neigh); 135 nr_node_put(nr_node); 136 return 0; 137 } 138 139 if (nr_neigh == NULL) { 140 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) { 141 if (nr_node) 142 nr_node_put(nr_node); 143 return -ENOMEM; 144 } 145 146 nr_neigh->callsign = *ax25; 147 nr_neigh->digipeat = NULL; 148 nr_neigh->ax25 = NULL; 149 nr_neigh->dev = dev; 150 nr_neigh->quality = sysctl_netrom_default_path_quality; 151 nr_neigh->locked = 0; 152 nr_neigh->count = 0; 153 nr_neigh->number = nr_neigh_no++; 154 nr_neigh->failed = 0; 155 atomic_set(&nr_neigh->refcount, 1); 156 157 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 158 if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) { 159 kfree(nr_neigh); 160 if (nr_node) 161 nr_node_put(nr_node); 162 return -ENOMEM; 163 } 164 memcpy(nr_neigh->digipeat, ax25_digi, 165 sizeof(*ax25_digi)); 166 } 167 168 spin_lock_bh(&nr_neigh_list_lock); 169 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 170 nr_neigh_hold(nr_neigh); 171 spin_unlock_bh(&nr_neigh_list_lock); 172 } 173 174 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked) 175 nr_neigh->quality = quality; 176 177 if (nr_node == NULL) { 178 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) { 179 if (nr_neigh) 180 nr_neigh_put(nr_neigh); 181 return -ENOMEM; 182 } 183 184 nr_node->callsign = *nr; 185 strcpy(nr_node->mnemonic, mnemonic); 186 187 nr_node->which = 0; 188 nr_node->count = 1; 189 atomic_set(&nr_node->refcount, 1); 190 spin_lock_init(&nr_node->node_lock); 191 192 nr_node->routes[0].quality = quality; 193 nr_node->routes[0].obs_count = obs_count; 194 nr_node->routes[0].neighbour = nr_neigh; 195 196 nr_neigh_hold(nr_neigh); 197 nr_neigh->count++; 198 199 spin_lock_bh(&nr_node_list_lock); 200 hlist_add_head(&nr_node->node_node, &nr_node_list); 201 /* refcount initialized at 1 */ 202 spin_unlock_bh(&nr_node_list_lock); 203 204 return 0; 205 } 206 nr_node_lock(nr_node); 207 208 if (quality != 0) 209 strcpy(nr_node->mnemonic, mnemonic); 210 211 for (found = 0, i = 0; i < nr_node->count; i++) { 212 if (nr_node->routes[i].neighbour == nr_neigh) { 213 nr_node->routes[i].quality = quality; 214 nr_node->routes[i].obs_count = obs_count; 215 found = 1; 216 break; 217 } 218 } 219 220 if (!found) { 221 /* We have space at the bottom, slot it in */ 222 if (nr_node->count < 3) { 223 nr_node->routes[2] = nr_node->routes[1]; 224 nr_node->routes[1] = nr_node->routes[0]; 225 226 nr_node->routes[0].quality = quality; 227 nr_node->routes[0].obs_count = obs_count; 228 nr_node->routes[0].neighbour = nr_neigh; 229 230 nr_node->which++; 231 nr_node->count++; 232 nr_neigh_hold(nr_neigh); 233 nr_neigh->count++; 234 } else { 235 /* It must be better than the worst */ 236 if (quality > nr_node->routes[2].quality) { 237 nr_node->routes[2].neighbour->count--; 238 nr_neigh_put(nr_node->routes[2].neighbour); 239 240 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked) 241 nr_remove_neigh(nr_node->routes[2].neighbour); 242 243 nr_node->routes[2].quality = quality; 244 nr_node->routes[2].obs_count = obs_count; 245 nr_node->routes[2].neighbour = nr_neigh; 246 247 nr_neigh_hold(nr_neigh); 248 nr_neigh->count++; 249 } 250 } 251 } 252 253 /* Now re-sort the routes in quality order */ 254 switch (nr_node->count) { 255 case 3: 256 if (nr_node->routes[1].quality > nr_node->routes[0].quality) { 257 switch (nr_node->which) { 258 case 0: nr_node->which = 1; break; 259 case 1: nr_node->which = 0; break; 260 default: break; 261 } 262 nr_route = nr_node->routes[0]; 263 nr_node->routes[0] = nr_node->routes[1]; 264 nr_node->routes[1] = nr_route; 265 } 266 if (nr_node->routes[2].quality > nr_node->routes[1].quality) { 267 switch (nr_node->which) { 268 case 1: nr_node->which = 2; 269 break; 270 271 case 2: nr_node->which = 1; 272 break; 273 274 default: 275 break; 276 } 277 nr_route = nr_node->routes[1]; 278 nr_node->routes[1] = nr_node->routes[2]; 279 nr_node->routes[2] = nr_route; 280 } 281 case 2: 282 if (nr_node->routes[1].quality > nr_node->routes[0].quality) { 283 switch (nr_node->which) { 284 case 0: nr_node->which = 1; 285 break; 286 287 case 1: nr_node->which = 0; 288 break; 289 290 default: break; 291 } 292 nr_route = nr_node->routes[0]; 293 nr_node->routes[0] = nr_node->routes[1]; 294 nr_node->routes[1] = nr_route; 295 } 296 case 1: 297 break; 298 } 299 300 for (i = 0; i < nr_node->count; i++) { 301 if (nr_node->routes[i].neighbour == nr_neigh) { 302 if (i < nr_node->which) 303 nr_node->which = i; 304 break; 305 } 306 } 307 308 nr_neigh_put(nr_neigh); 309 nr_node_unlock(nr_node); 310 nr_node_put(nr_node); 311 return 0; 312 } 313 314 static inline void __nr_remove_node(struct nr_node *nr_node) 315 { 316 hlist_del_init(&nr_node->node_node); 317 nr_node_put(nr_node); 318 } 319 320 #define nr_remove_node_locked(__node) \ 321 __nr_remove_node(__node) 322 323 static void nr_remove_node(struct nr_node *nr_node) 324 { 325 spin_lock_bh(&nr_node_list_lock); 326 __nr_remove_node(nr_node); 327 spin_unlock_bh(&nr_node_list_lock); 328 } 329 330 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh) 331 { 332 hlist_del_init(&nr_neigh->neigh_node); 333 nr_neigh_put(nr_neigh); 334 } 335 336 #define nr_remove_neigh_locked(__neigh) \ 337 __nr_remove_neigh(__neigh) 338 339 static void nr_remove_neigh(struct nr_neigh *nr_neigh) 340 { 341 spin_lock_bh(&nr_neigh_list_lock); 342 __nr_remove_neigh(nr_neigh); 343 spin_unlock_bh(&nr_neigh_list_lock); 344 } 345 346 /* 347 * "Delete" a node. Strictly speaking remove a route to a node. The node 348 * is only deleted if no routes are left to it. 349 */ 350 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev) 351 { 352 struct nr_node *nr_node; 353 struct nr_neigh *nr_neigh; 354 int i; 355 356 nr_node = nr_node_get(callsign); 357 358 if (nr_node == NULL) 359 return -EINVAL; 360 361 nr_neigh = nr_neigh_get_dev(neighbour, dev); 362 363 if (nr_neigh == NULL) { 364 nr_node_put(nr_node); 365 return -EINVAL; 366 } 367 368 nr_node_lock(nr_node); 369 for (i = 0; i < nr_node->count; i++) { 370 if (nr_node->routes[i].neighbour == nr_neigh) { 371 nr_neigh->count--; 372 nr_neigh_put(nr_neigh); 373 374 if (nr_neigh->count == 0 && !nr_neigh->locked) 375 nr_remove_neigh(nr_neigh); 376 nr_neigh_put(nr_neigh); 377 378 nr_node->count--; 379 380 if (nr_node->count == 0) { 381 nr_remove_node(nr_node); 382 } else { 383 switch (i) { 384 case 0: 385 nr_node->routes[0] = nr_node->routes[1]; 386 case 1: 387 nr_node->routes[1] = nr_node->routes[2]; 388 case 2: 389 break; 390 } 391 nr_node_put(nr_node); 392 } 393 nr_node_unlock(nr_node); 394 395 return 0; 396 } 397 } 398 nr_neigh_put(nr_neigh); 399 nr_node_unlock(nr_node); 400 nr_node_put(nr_node); 401 402 return -EINVAL; 403 } 404 405 /* 406 * Lock a neighbour with a quality. 407 */ 408 static int nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) 409 { 410 struct nr_neigh *nr_neigh; 411 412 nr_neigh = nr_neigh_get_dev(callsign, dev); 413 if (nr_neigh) { 414 nr_neigh->quality = quality; 415 nr_neigh->locked = 1; 416 nr_neigh_put(nr_neigh); 417 return 0; 418 } 419 420 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) 421 return -ENOMEM; 422 423 nr_neigh->callsign = *callsign; 424 nr_neigh->digipeat = NULL; 425 nr_neigh->ax25 = NULL; 426 nr_neigh->dev = dev; 427 nr_neigh->quality = quality; 428 nr_neigh->locked = 1; 429 nr_neigh->count = 0; 430 nr_neigh->number = nr_neigh_no++; 431 nr_neigh->failed = 0; 432 atomic_set(&nr_neigh->refcount, 1); 433 434 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 435 if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) { 436 kfree(nr_neigh); 437 return -ENOMEM; 438 } 439 memcpy(nr_neigh->digipeat, ax25_digi, sizeof(*ax25_digi)); 440 } 441 442 spin_lock_bh(&nr_neigh_list_lock); 443 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 444 /* refcount is initialized at 1 */ 445 spin_unlock_bh(&nr_neigh_list_lock); 446 447 return 0; 448 } 449 450 /* 451 * "Delete" a neighbour. The neighbour is only removed if the number 452 * of nodes that may use it is zero. 453 */ 454 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality) 455 { 456 struct nr_neigh *nr_neigh; 457 458 nr_neigh = nr_neigh_get_dev(callsign, dev); 459 460 if (nr_neigh == NULL) return -EINVAL; 461 462 nr_neigh->quality = quality; 463 nr_neigh->locked = 0; 464 465 if (nr_neigh->count == 0) 466 nr_remove_neigh(nr_neigh); 467 nr_neigh_put(nr_neigh); 468 469 return 0; 470 } 471 472 /* 473 * Decrement the obsolescence count by one. If a route is reduced to a 474 * count of zero, remove it. Also remove any unlocked neighbours with 475 * zero nodes routing via it. 476 */ 477 static int nr_dec_obs(void) 478 { 479 struct nr_neigh *nr_neigh; 480 struct nr_node *s; 481 struct hlist_node *node, *nodet; 482 int i; 483 484 spin_lock_bh(&nr_node_list_lock); 485 nr_node_for_each_safe(s, node, nodet, &nr_node_list) { 486 nr_node_lock(s); 487 for (i = 0; i < s->count; i++) { 488 switch (s->routes[i].obs_count) { 489 case 0: /* A locked entry */ 490 break; 491 492 case 1: /* From 1 -> 0 */ 493 nr_neigh = s->routes[i].neighbour; 494 495 nr_neigh->count--; 496 nr_neigh_put(nr_neigh); 497 498 if (nr_neigh->count == 0 && !nr_neigh->locked) 499 nr_remove_neigh(nr_neigh); 500 501 s->count--; 502 503 switch (i) { 504 case 0: 505 s->routes[0] = s->routes[1]; 506 case 1: 507 s->routes[1] = s->routes[2]; 508 case 2: 509 break; 510 } 511 break; 512 513 default: 514 s->routes[i].obs_count--; 515 break; 516 517 } 518 } 519 520 if (s->count <= 0) 521 nr_remove_node_locked(s); 522 nr_node_unlock(s); 523 } 524 spin_unlock_bh(&nr_node_list_lock); 525 526 return 0; 527 } 528 529 /* 530 * A device has been removed. Remove its routes and neighbours. 531 */ 532 void nr_rt_device_down(struct net_device *dev) 533 { 534 struct nr_neigh *s; 535 struct hlist_node *node, *nodet, *node2, *node2t; 536 struct nr_node *t; 537 int i; 538 539 spin_lock_bh(&nr_neigh_list_lock); 540 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 541 if (s->dev == dev) { 542 spin_lock_bh(&nr_node_list_lock); 543 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) { 544 nr_node_lock(t); 545 for (i = 0; i < t->count; i++) { 546 if (t->routes[i].neighbour == s) { 547 t->count--; 548 549 switch (i) { 550 case 0: 551 t->routes[0] = t->routes[1]; 552 case 1: 553 t->routes[1] = t->routes[2]; 554 case 2: 555 break; 556 } 557 } 558 } 559 560 if (t->count <= 0) 561 nr_remove_node_locked(t); 562 nr_node_unlock(t); 563 } 564 spin_unlock_bh(&nr_node_list_lock); 565 566 nr_remove_neigh_locked(s); 567 } 568 } 569 spin_unlock_bh(&nr_neigh_list_lock); 570 } 571 572 /* 573 * Check that the device given is a valid AX.25 interface that is "up". 574 * Or a valid ethernet interface with an AX.25 callsign binding. 575 */ 576 static struct net_device *nr_ax25_dev_get(char *devname) 577 { 578 struct net_device *dev; 579 580 if ((dev = dev_get_by_name(devname)) == NULL) 581 return NULL; 582 583 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) 584 return dev; 585 586 dev_put(dev); 587 return NULL; 588 } 589 590 /* 591 * Find the first active NET/ROM device, usually "nr0". 592 */ 593 struct net_device *nr_dev_first(void) 594 { 595 struct net_device *dev, *first = NULL; 596 597 read_lock(&dev_base_lock); 598 for (dev = dev_base; dev != NULL; dev = dev->next) { 599 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) 600 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 601 first = dev; 602 } 603 if (first) 604 dev_hold(first); 605 read_unlock(&dev_base_lock); 606 607 return first; 608 } 609 610 /* 611 * Find the NET/ROM device for the given callsign. 612 */ 613 struct net_device *nr_dev_get(ax25_address *addr) 614 { 615 struct net_device *dev; 616 617 read_lock(&dev_base_lock); 618 for (dev = dev_base; dev != NULL; dev = dev->next) { 619 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { 620 dev_hold(dev); 621 goto out; 622 } 623 } 624 out: 625 read_unlock(&dev_base_lock); 626 return dev; 627 } 628 629 static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters) 630 { 631 static ax25_digi ax25_digi; 632 int i; 633 634 if (ndigis == 0) 635 return NULL; 636 637 for (i = 0; i < ndigis; i++) { 638 ax25_digi.calls[i] = digipeaters[i]; 639 ax25_digi.repeated[i] = 0; 640 } 641 642 ax25_digi.ndigi = ndigis; 643 ax25_digi.lastrepeat = -1; 644 645 return &ax25_digi; 646 } 647 648 /* 649 * Handle the ioctls that control the routing functions. 650 */ 651 int nr_rt_ioctl(unsigned int cmd, void __user *arg) 652 { 653 struct nr_route_struct nr_route; 654 struct net_device *dev; 655 int ret; 656 657 switch (cmd) { 658 case SIOCADDRT: 659 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 660 return -EFAULT; 661 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 662 return -EINVAL; 663 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) { 664 dev_put(dev); 665 return -EINVAL; 666 } 667 switch (nr_route.type) { 668 case NETROM_NODE: 669 ret = nr_add_node(&nr_route.callsign, 670 nr_route.mnemonic, 671 &nr_route.neighbour, 672 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters), 673 dev, nr_route.quality, 674 nr_route.obs_count); 675 break; 676 case NETROM_NEIGH: 677 ret = nr_add_neigh(&nr_route.callsign, 678 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters), 679 dev, nr_route.quality); 680 break; 681 default: 682 ret = -EINVAL; 683 } 684 dev_put(dev); 685 return ret; 686 687 case SIOCDELRT: 688 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 689 return -EFAULT; 690 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 691 return -EINVAL; 692 switch (nr_route.type) { 693 case NETROM_NODE: 694 ret = nr_del_node(&nr_route.callsign, 695 &nr_route.neighbour, dev); 696 break; 697 case NETROM_NEIGH: 698 ret = nr_del_neigh(&nr_route.callsign, 699 dev, nr_route.quality); 700 break; 701 default: 702 ret = -EINVAL; 703 } 704 dev_put(dev); 705 return ret; 706 707 case SIOCNRDECOBS: 708 return nr_dec_obs(); 709 710 default: 711 return -EINVAL; 712 } 713 714 return 0; 715 } 716 717 /* 718 * A level 2 link has timed out, therefore it appears to be a poor link, 719 * then don't use that neighbour until it is reset. 720 */ 721 void nr_link_failed(ax25_cb *ax25, int reason) 722 { 723 struct nr_neigh *s, *nr_neigh = NULL; 724 struct hlist_node *node; 725 struct nr_node *nr_node = NULL; 726 727 spin_lock_bh(&nr_neigh_list_lock); 728 nr_neigh_for_each(s, node, &nr_neigh_list) 729 if (s->ax25 == ax25) { 730 nr_neigh_hold(s); 731 nr_neigh = s; 732 break; 733 } 734 spin_unlock_bh(&nr_neigh_list_lock); 735 736 if (nr_neigh == NULL) return; 737 738 nr_neigh->ax25 = NULL; 739 ax25_cb_put(ax25); 740 741 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) { 742 nr_neigh_put(nr_neigh); 743 return; 744 } 745 spin_lock_bh(&nr_node_list_lock); 746 nr_node_for_each(nr_node, node, &nr_node_list) 747 nr_node_lock(nr_node); 748 if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh) 749 nr_node->which++; 750 nr_node_unlock(nr_node); 751 spin_unlock_bh(&nr_node_list_lock); 752 nr_neigh_put(nr_neigh); 753 } 754 755 /* 756 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb 757 * indicates an internally generated frame. 758 */ 759 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) 760 { 761 ax25_address *nr_src, *nr_dest; 762 struct nr_neigh *nr_neigh; 763 struct nr_node *nr_node; 764 struct net_device *dev; 765 unsigned char *dptr; 766 ax25_cb *ax25s; 767 int ret; 768 struct sk_buff *skbn; 769 770 771 nr_src = (ax25_address *)(skb->data + 0); 772 nr_dest = (ax25_address *)(skb->data + 7); 773 774 if (ax25 != NULL) 775 nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, 776 ax25->ax25_dev->dev, 0, sysctl_netrom_obsolescence_count_initialiser); 777 778 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ 779 if (ax25 == NULL) /* Its from me */ 780 ret = nr_loopback_queue(skb); 781 else 782 ret = nr_rx_frame(skb, dev); 783 dev_put(dev); 784 return ret; 785 } 786 787 if (!sysctl_netrom_routing_control && ax25 != NULL) 788 return 0; 789 790 /* Its Time-To-Live has expired */ 791 if (skb->data[14] == 1) { 792 return 0; 793 } 794 795 nr_node = nr_node_get(nr_dest); 796 if (nr_node == NULL) 797 return 0; 798 nr_node_lock(nr_node); 799 800 if (nr_node->which >= nr_node->count) { 801 nr_node_unlock(nr_node); 802 nr_node_put(nr_node); 803 return 0; 804 } 805 806 nr_neigh = nr_node->routes[nr_node->which].neighbour; 807 808 if ((dev = nr_dev_first()) == NULL) { 809 nr_node_unlock(nr_node); 810 nr_node_put(nr_node); 811 return 0; 812 } 813 814 /* We are going to change the netrom headers so we should get our 815 own skb, we also did not know until now how much header space 816 we had to reserve... - RXQ */ 817 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { 818 nr_node_unlock(nr_node); 819 nr_node_put(nr_node); 820 dev_put(dev); 821 return 0; 822 } 823 kfree_skb(skb); 824 skb=skbn; 825 skb->data[14]--; 826 827 dptr = skb_push(skb, 1); 828 *dptr = AX25_P_NETROM; 829 830 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); 831 if (nr_neigh->ax25 && ax25s) { 832 /* We were already holding this ax25_cb */ 833 ax25_cb_put(ax25s); 834 } 835 nr_neigh->ax25 = ax25s; 836 837 dev_put(dev); 838 ret = (nr_neigh->ax25 != NULL); 839 nr_node_unlock(nr_node); 840 nr_node_put(nr_node); 841 return ret; 842 } 843 844 #ifdef CONFIG_PROC_FS 845 846 static void *nr_node_start(struct seq_file *seq, loff_t *pos) 847 { 848 struct nr_node *nr_node; 849 struct hlist_node *node; 850 int i = 1; 851 852 spin_lock_bh(&nr_node_list_lock); 853 if (*pos == 0) 854 return SEQ_START_TOKEN; 855 856 nr_node_for_each(nr_node, node, &nr_node_list) { 857 if (i == *pos) 858 return nr_node; 859 ++i; 860 } 861 862 return NULL; 863 } 864 865 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) 866 { 867 struct hlist_node *node; 868 ++*pos; 869 870 node = (v == SEQ_START_TOKEN) 871 ? nr_node_list.first 872 : ((struct nr_node *)v)->node_node.next; 873 874 return hlist_entry(node, struct nr_node, node_node); 875 } 876 877 static void nr_node_stop(struct seq_file *seq, void *v) 878 { 879 spin_unlock_bh(&nr_node_list_lock); 880 } 881 882 static int nr_node_show(struct seq_file *seq, void *v) 883 { 884 int i; 885 886 if (v == SEQ_START_TOKEN) 887 seq_puts(seq, 888 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); 889 else { 890 struct nr_node *nr_node = v; 891 nr_node_lock(nr_node); 892 seq_printf(seq, "%-9s %-7s %d %d", 893 ax2asc(&nr_node->callsign), 894 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic, 895 nr_node->which + 1, 896 nr_node->count); 897 898 for (i = 0; i < nr_node->count; i++) { 899 seq_printf(seq, " %3d %d %05d", 900 nr_node->routes[i].quality, 901 nr_node->routes[i].obs_count, 902 nr_node->routes[i].neighbour->number); 903 } 904 nr_node_unlock(nr_node); 905 906 seq_puts(seq, "\n"); 907 } 908 return 0; 909 } 910 911 static struct seq_operations nr_node_seqops = { 912 .start = nr_node_start, 913 .next = nr_node_next, 914 .stop = nr_node_stop, 915 .show = nr_node_show, 916 }; 917 918 static int nr_node_info_open(struct inode *inode, struct file *file) 919 { 920 return seq_open(file, &nr_node_seqops); 921 } 922 923 struct file_operations nr_nodes_fops = { 924 .owner = THIS_MODULE, 925 .open = nr_node_info_open, 926 .read = seq_read, 927 .llseek = seq_lseek, 928 .release = seq_release, 929 }; 930 931 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) 932 { 933 struct nr_neigh *nr_neigh; 934 struct hlist_node *node; 935 int i = 1; 936 937 spin_lock_bh(&nr_neigh_list_lock); 938 if (*pos == 0) 939 return SEQ_START_TOKEN; 940 941 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) { 942 if (i == *pos) 943 return nr_neigh; 944 } 945 return NULL; 946 } 947 948 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 949 { 950 struct hlist_node *node; 951 ++*pos; 952 953 node = (v == SEQ_START_TOKEN) 954 ? nr_neigh_list.first 955 : ((struct nr_neigh *)v)->neigh_node.next; 956 957 return hlist_entry(node, struct nr_neigh, neigh_node); 958 } 959 960 static void nr_neigh_stop(struct seq_file *seq, void *v) 961 { 962 spin_unlock_bh(&nr_neigh_list_lock); 963 } 964 965 static int nr_neigh_show(struct seq_file *seq, void *v) 966 { 967 int i; 968 969 if (v == SEQ_START_TOKEN) 970 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); 971 else { 972 struct nr_neigh *nr_neigh = v; 973 974 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 975 nr_neigh->number, 976 ax2asc(&nr_neigh->callsign), 977 nr_neigh->dev ? nr_neigh->dev->name : "???", 978 nr_neigh->quality, 979 nr_neigh->locked, 980 nr_neigh->count, 981 nr_neigh->failed); 982 983 if (nr_neigh->digipeat != NULL) { 984 for (i = 0; i < nr_neigh->digipeat->ndigi; i++) 985 seq_printf(seq, " %s", 986 ax2asc(&nr_neigh->digipeat->calls[i])); 987 } 988 989 seq_puts(seq, "\n"); 990 } 991 return 0; 992 } 993 994 static struct seq_operations nr_neigh_seqops = { 995 .start = nr_neigh_start, 996 .next = nr_neigh_next, 997 .stop = nr_neigh_stop, 998 .show = nr_neigh_show, 999 }; 1000 1001 static int nr_neigh_info_open(struct inode *inode, struct file *file) 1002 { 1003 return seq_open(file, &nr_neigh_seqops); 1004 } 1005 1006 struct file_operations nr_neigh_fops = { 1007 .owner = THIS_MODULE, 1008 .open = nr_neigh_info_open, 1009 .read = seq_read, 1010 .llseek = seq_lseek, 1011 .release = seq_release, 1012 }; 1013 1014 #endif 1015 1016 /* 1017 * Free all memory associated with the nodes and routes lists. 1018 */ 1019 void __exit nr_rt_free(void) 1020 { 1021 struct nr_neigh *s = NULL; 1022 struct nr_node *t = NULL; 1023 struct hlist_node *node, *nodet; 1024 1025 spin_lock_bh(&nr_neigh_list_lock); 1026 spin_lock_bh(&nr_node_list_lock); 1027 nr_node_for_each_safe(t, node, nodet, &nr_node_list) { 1028 nr_node_lock(t); 1029 nr_remove_node_locked(t); 1030 nr_node_unlock(t); 1031 } 1032 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) { 1033 while(s->count) { 1034 s->count--; 1035 nr_neigh_put(s); 1036 } 1037 nr_remove_neigh_locked(s); 1038 } 1039 spin_unlock_bh(&nr_node_list_lock); 1040 spin_unlock_bh(&nr_neigh_list_lock); 1041 } 1042