1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) 10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) 11 */ 12 13 #include <linux/capability.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/errno.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/in.h> 21 #include <linux/kernel.h> 22 #include <linux/sched.h> 23 #include <linux/spinlock.h> 24 #include <linux/timer.h> 25 #include <linux/string.h> 26 #include <linux/sockios.h> 27 #include <linux/net.h> 28 #include <linux/stat.h> 29 #include <net/net_namespace.h> 30 #include <net/ax25.h> 31 #include <linux/inet.h> 32 #include <linux/netdevice.h> 33 #include <linux/if_arp.h> 34 #include <linux/skbuff.h> 35 #include <net/sock.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <linux/fcntl.h> 39 #include <linux/termios.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/notifier.h> 43 #include <net/rose.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <net/tcp_states.h> 47 #include <net/ip.h> 48 #include <net/arp.h> 49 50 static int rose_ndevs = 10; 51 52 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; 53 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; 54 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; 55 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; 56 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; 57 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; 58 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; 59 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; 60 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; 61 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; 62 63 static HLIST_HEAD(rose_list); 64 static DEFINE_SPINLOCK(rose_list_lock); 65 66 static struct proto_ops rose_proto_ops; 67 68 ax25_address rose_callsign; 69 70 /* 71 * ROSE network devices are virtual network devices encapsulating ROSE 72 * frames into AX.25 which will be sent through an AX.25 device, so form a 73 * special "super class" of normal net devices; split their locks off into a 74 * separate class since they always nest. 75 */ 76 static struct lock_class_key rose_netdev_xmit_lock_key; 77 78 /* 79 * Convert a ROSE address into text. 80 */ 81 const char *rose2asc(const rose_address *addr) 82 { 83 static char buffer[11]; 84 85 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && 86 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && 87 addr->rose_addr[4] == 0x00) { 88 strcpy(buffer, "*"); 89 } else { 90 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, 91 addr->rose_addr[1] & 0xFF, 92 addr->rose_addr[2] & 0xFF, 93 addr->rose_addr[3] & 0xFF, 94 addr->rose_addr[4] & 0xFF); 95 } 96 97 return buffer; 98 } 99 100 /* 101 * Compare two ROSE addresses, 0 == equal. 102 */ 103 int rosecmp(rose_address *addr1, rose_address *addr2) 104 { 105 int i; 106 107 for (i = 0; i < 5; i++) 108 if (addr1->rose_addr[i] != addr2->rose_addr[i]) 109 return 1; 110 111 return 0; 112 } 113 114 /* 115 * Compare two ROSE addresses for only mask digits, 0 == equal. 116 */ 117 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) 118 { 119 unsigned int i, j; 120 121 if (mask > 10) 122 return 1; 123 124 for (i = 0; i < mask; i++) { 125 j = i / 2; 126 127 if ((i % 2) != 0) { 128 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) 129 return 1; 130 } else { 131 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) 132 return 1; 133 } 134 } 135 136 return 0; 137 } 138 139 /* 140 * Socket removal during an interrupt is now safe. 141 */ 142 static void rose_remove_socket(struct sock *sk) 143 { 144 spin_lock_bh(&rose_list_lock); 145 sk_del_node_init(sk); 146 spin_unlock_bh(&rose_list_lock); 147 } 148 149 /* 150 * Kill all bound sockets on a broken link layer connection to a 151 * particular neighbour. 152 */ 153 void rose_kill_by_neigh(struct rose_neigh *neigh) 154 { 155 struct sock *s; 156 struct hlist_node *node; 157 158 spin_lock_bh(&rose_list_lock); 159 sk_for_each(s, node, &rose_list) { 160 struct rose_sock *rose = rose_sk(s); 161 162 if (rose->neighbour == neigh) { 163 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 164 rose->neighbour->use--; 165 rose->neighbour = NULL; 166 } 167 } 168 spin_unlock_bh(&rose_list_lock); 169 } 170 171 /* 172 * Kill all bound sockets on a dropped device. 173 */ 174 static void rose_kill_by_device(struct net_device *dev) 175 { 176 struct sock *s; 177 struct hlist_node *node; 178 179 spin_lock_bh(&rose_list_lock); 180 sk_for_each(s, node, &rose_list) { 181 struct rose_sock *rose = rose_sk(s); 182 183 if (rose->device == dev) { 184 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 185 rose->neighbour->use--; 186 rose->device = NULL; 187 } 188 } 189 spin_unlock_bh(&rose_list_lock); 190 } 191 192 /* 193 * Handle device status changes. 194 */ 195 static int rose_device_event(struct notifier_block *this, unsigned long event, 196 void *ptr) 197 { 198 struct net_device *dev = (struct net_device *)ptr; 199 200 if (dev->nd_net != &init_net) 201 return NOTIFY_DONE; 202 203 if (event != NETDEV_DOWN) 204 return NOTIFY_DONE; 205 206 switch (dev->type) { 207 case ARPHRD_ROSE: 208 rose_kill_by_device(dev); 209 break; 210 case ARPHRD_AX25: 211 rose_link_device_down(dev); 212 rose_rt_device_down(dev); 213 break; 214 } 215 216 return NOTIFY_DONE; 217 } 218 219 /* 220 * Add a socket to the bound sockets list. 221 */ 222 static void rose_insert_socket(struct sock *sk) 223 { 224 225 spin_lock_bh(&rose_list_lock); 226 sk_add_node(sk, &rose_list); 227 spin_unlock_bh(&rose_list_lock); 228 } 229 230 /* 231 * Find a socket that wants to accept the Call Request we just 232 * received. 233 */ 234 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) 235 { 236 struct sock *s; 237 struct hlist_node *node; 238 239 spin_lock_bh(&rose_list_lock); 240 sk_for_each(s, node, &rose_list) { 241 struct rose_sock *rose = rose_sk(s); 242 243 if (!rosecmp(&rose->source_addr, addr) && 244 !ax25cmp(&rose->source_call, call) && 245 !rose->source_ndigis && s->sk_state == TCP_LISTEN) 246 goto found; 247 } 248 249 sk_for_each(s, node, &rose_list) { 250 struct rose_sock *rose = rose_sk(s); 251 252 if (!rosecmp(&rose->source_addr, addr) && 253 !ax25cmp(&rose->source_call, &null_ax25_address) && 254 s->sk_state == TCP_LISTEN) 255 goto found; 256 } 257 s = NULL; 258 found: 259 spin_unlock_bh(&rose_list_lock); 260 return s; 261 } 262 263 /* 264 * Find a connected ROSE socket given my LCI and device. 265 */ 266 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) 267 { 268 struct sock *s; 269 struct hlist_node *node; 270 271 spin_lock_bh(&rose_list_lock); 272 sk_for_each(s, node, &rose_list) { 273 struct rose_sock *rose = rose_sk(s); 274 275 if (rose->lci == lci && rose->neighbour == neigh) 276 goto found; 277 } 278 s = NULL; 279 found: 280 spin_unlock_bh(&rose_list_lock); 281 return s; 282 } 283 284 /* 285 * Find a unique LCI for a given device. 286 */ 287 unsigned int rose_new_lci(struct rose_neigh *neigh) 288 { 289 int lci; 290 291 if (neigh->dce_mode) { 292 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) 293 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 294 return lci; 295 } else { 296 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) 297 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 298 return lci; 299 } 300 301 return 0; 302 } 303 304 /* 305 * Deferred destroy. 306 */ 307 void rose_destroy_socket(struct sock *); 308 309 /* 310 * Handler for deferred kills. 311 */ 312 static void rose_destroy_timer(unsigned long data) 313 { 314 rose_destroy_socket((struct sock *)data); 315 } 316 317 /* 318 * This is called from user mode and the timers. Thus it protects itself 319 * against interrupt users but doesn't worry about being called during 320 * work. Once it is removed from the queue no interrupt or bottom half 321 * will touch it and we are (fairly 8-) ) safe. 322 */ 323 void rose_destroy_socket(struct sock *sk) 324 { 325 struct sk_buff *skb; 326 327 rose_remove_socket(sk); 328 rose_stop_heartbeat(sk); 329 rose_stop_idletimer(sk); 330 rose_stop_timer(sk); 331 332 rose_clear_queues(sk); /* Flush the queues */ 333 334 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 335 if (skb->sk != sk) { /* A pending connection */ 336 /* Queue the unaccepted socket for death */ 337 sock_set_flag(skb->sk, SOCK_DEAD); 338 rose_start_heartbeat(skb->sk); 339 rose_sk(skb->sk)->state = ROSE_STATE_0; 340 } 341 342 kfree_skb(skb); 343 } 344 345 if (atomic_read(&sk->sk_wmem_alloc) || 346 atomic_read(&sk->sk_rmem_alloc)) { 347 /* Defer: outstanding buffers */ 348 setup_timer(&sk->sk_timer, rose_destroy_timer, 349 (unsigned long)sk); 350 sk->sk_timer.expires = jiffies + 10 * HZ; 351 add_timer(&sk->sk_timer); 352 } else 353 sock_put(sk); 354 } 355 356 /* 357 * Handling for system calls applied via the various interfaces to a 358 * ROSE socket object. 359 */ 360 361 static int rose_setsockopt(struct socket *sock, int level, int optname, 362 char __user *optval, int optlen) 363 { 364 struct sock *sk = sock->sk; 365 struct rose_sock *rose = rose_sk(sk); 366 int opt; 367 368 if (level != SOL_ROSE) 369 return -ENOPROTOOPT; 370 371 if (optlen < sizeof(int)) 372 return -EINVAL; 373 374 if (get_user(opt, (int __user *)optval)) 375 return -EFAULT; 376 377 switch (optname) { 378 case ROSE_DEFER: 379 rose->defer = opt ? 1 : 0; 380 return 0; 381 382 case ROSE_T1: 383 if (opt < 1) 384 return -EINVAL; 385 rose->t1 = opt * HZ; 386 return 0; 387 388 case ROSE_T2: 389 if (opt < 1) 390 return -EINVAL; 391 rose->t2 = opt * HZ; 392 return 0; 393 394 case ROSE_T3: 395 if (opt < 1) 396 return -EINVAL; 397 rose->t3 = opt * HZ; 398 return 0; 399 400 case ROSE_HOLDBACK: 401 if (opt < 1) 402 return -EINVAL; 403 rose->hb = opt * HZ; 404 return 0; 405 406 case ROSE_IDLE: 407 if (opt < 0) 408 return -EINVAL; 409 rose->idle = opt * 60 * HZ; 410 return 0; 411 412 case ROSE_QBITINCL: 413 rose->qbitincl = opt ? 1 : 0; 414 return 0; 415 416 default: 417 return -ENOPROTOOPT; 418 } 419 } 420 421 static int rose_getsockopt(struct socket *sock, int level, int optname, 422 char __user *optval, int __user *optlen) 423 { 424 struct sock *sk = sock->sk; 425 struct rose_sock *rose = rose_sk(sk); 426 int val = 0; 427 int len; 428 429 if (level != SOL_ROSE) 430 return -ENOPROTOOPT; 431 432 if (get_user(len, optlen)) 433 return -EFAULT; 434 435 if (len < 0) 436 return -EINVAL; 437 438 switch (optname) { 439 case ROSE_DEFER: 440 val = rose->defer; 441 break; 442 443 case ROSE_T1: 444 val = rose->t1 / HZ; 445 break; 446 447 case ROSE_T2: 448 val = rose->t2 / HZ; 449 break; 450 451 case ROSE_T3: 452 val = rose->t3 / HZ; 453 break; 454 455 case ROSE_HOLDBACK: 456 val = rose->hb / HZ; 457 break; 458 459 case ROSE_IDLE: 460 val = rose->idle / (60 * HZ); 461 break; 462 463 case ROSE_QBITINCL: 464 val = rose->qbitincl; 465 break; 466 467 default: 468 return -ENOPROTOOPT; 469 } 470 471 len = min_t(unsigned int, len, sizeof(int)); 472 473 if (put_user(len, optlen)) 474 return -EFAULT; 475 476 return copy_to_user(optval, &val, len) ? -EFAULT : 0; 477 } 478 479 static int rose_listen(struct socket *sock, int backlog) 480 { 481 struct sock *sk = sock->sk; 482 483 if (sk->sk_state != TCP_LISTEN) { 484 struct rose_sock *rose = rose_sk(sk); 485 486 rose->dest_ndigis = 0; 487 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); 488 memset(&rose->dest_call, 0, AX25_ADDR_LEN); 489 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); 490 sk->sk_max_ack_backlog = backlog; 491 sk->sk_state = TCP_LISTEN; 492 return 0; 493 } 494 495 return -EOPNOTSUPP; 496 } 497 498 static struct proto rose_proto = { 499 .name = "ROSE", 500 .owner = THIS_MODULE, 501 .obj_size = sizeof(struct rose_sock), 502 }; 503 504 static int rose_create(struct net *net, struct socket *sock, int protocol) 505 { 506 struct sock *sk; 507 struct rose_sock *rose; 508 509 if (net != &init_net) 510 return -EAFNOSUPPORT; 511 512 if (sock->type != SOCK_SEQPACKET || protocol != 0) 513 return -ESOCKTNOSUPPORT; 514 515 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); 516 if (sk == NULL) 517 return -ENOMEM; 518 519 rose = rose_sk(sk); 520 521 sock_init_data(sock, sk); 522 523 skb_queue_head_init(&rose->ack_queue); 524 #ifdef M_BIT 525 skb_queue_head_init(&rose->frag_queue); 526 rose->fraglen = 0; 527 #endif 528 529 sock->ops = &rose_proto_ops; 530 sk->sk_protocol = protocol; 531 532 init_timer(&rose->timer); 533 init_timer(&rose->idletimer); 534 535 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); 536 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); 537 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); 538 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); 539 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); 540 541 rose->state = ROSE_STATE_0; 542 543 return 0; 544 } 545 546 static struct sock *rose_make_new(struct sock *osk) 547 { 548 struct sock *sk; 549 struct rose_sock *rose, *orose; 550 551 if (osk->sk_type != SOCK_SEQPACKET) 552 return NULL; 553 554 sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto); 555 if (sk == NULL) 556 return NULL; 557 558 rose = rose_sk(sk); 559 560 sock_init_data(NULL, sk); 561 562 skb_queue_head_init(&rose->ack_queue); 563 #ifdef M_BIT 564 skb_queue_head_init(&rose->frag_queue); 565 rose->fraglen = 0; 566 #endif 567 568 sk->sk_type = osk->sk_type; 569 sk->sk_socket = osk->sk_socket; 570 sk->sk_priority = osk->sk_priority; 571 sk->sk_protocol = osk->sk_protocol; 572 sk->sk_rcvbuf = osk->sk_rcvbuf; 573 sk->sk_sndbuf = osk->sk_sndbuf; 574 sk->sk_state = TCP_ESTABLISHED; 575 sk->sk_sleep = osk->sk_sleep; 576 sock_copy_flags(sk, osk); 577 578 init_timer(&rose->timer); 579 init_timer(&rose->idletimer); 580 581 orose = rose_sk(osk); 582 rose->t1 = orose->t1; 583 rose->t2 = orose->t2; 584 rose->t3 = orose->t3; 585 rose->hb = orose->hb; 586 rose->idle = orose->idle; 587 rose->defer = orose->defer; 588 rose->device = orose->device; 589 rose->qbitincl = orose->qbitincl; 590 591 return sk; 592 } 593 594 static int rose_release(struct socket *sock) 595 { 596 struct sock *sk = sock->sk; 597 struct rose_sock *rose; 598 599 if (sk == NULL) return 0; 600 601 rose = rose_sk(sk); 602 603 switch (rose->state) { 604 case ROSE_STATE_0: 605 rose_disconnect(sk, 0, -1, -1); 606 rose_destroy_socket(sk); 607 break; 608 609 case ROSE_STATE_2: 610 rose->neighbour->use--; 611 rose_disconnect(sk, 0, -1, -1); 612 rose_destroy_socket(sk); 613 break; 614 615 case ROSE_STATE_1: 616 case ROSE_STATE_3: 617 case ROSE_STATE_4: 618 case ROSE_STATE_5: 619 rose_clear_queues(sk); 620 rose_stop_idletimer(sk); 621 rose_write_internal(sk, ROSE_CLEAR_REQUEST); 622 rose_start_t3timer(sk); 623 rose->state = ROSE_STATE_2; 624 sk->sk_state = TCP_CLOSE; 625 sk->sk_shutdown |= SEND_SHUTDOWN; 626 sk->sk_state_change(sk); 627 sock_set_flag(sk, SOCK_DEAD); 628 sock_set_flag(sk, SOCK_DESTROY); 629 break; 630 631 default: 632 break; 633 } 634 635 sock->sk = NULL; 636 637 return 0; 638 } 639 640 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 641 { 642 struct sock *sk = sock->sk; 643 struct rose_sock *rose = rose_sk(sk); 644 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 645 struct net_device *dev; 646 ax25_address *source; 647 ax25_uid_assoc *user; 648 int n; 649 650 if (!sock_flag(sk, SOCK_ZAPPED)) 651 return -EINVAL; 652 653 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 654 return -EINVAL; 655 656 if (addr->srose_family != AF_ROSE) 657 return -EINVAL; 658 659 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 660 return -EINVAL; 661 662 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 663 return -EINVAL; 664 665 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 666 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n"); 667 return -EADDRNOTAVAIL; 668 } 669 670 source = &addr->srose_call; 671 672 user = ax25_findbyuid(current->euid); 673 if (user) { 674 rose->source_call = user->call; 675 ax25_uid_put(user); 676 } else { 677 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) 678 return -EACCES; 679 rose->source_call = *source; 680 } 681 682 rose->source_addr = addr->srose_addr; 683 rose->device = dev; 684 rose->source_ndigis = addr->srose_ndigis; 685 686 if (addr_len == sizeof(struct full_sockaddr_rose)) { 687 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 688 for (n = 0 ; n < addr->srose_ndigis ; n++) 689 rose->source_digis[n] = full_addr->srose_digis[n]; 690 } else { 691 if (rose->source_ndigis == 1) { 692 rose->source_digis[0] = addr->srose_digi; 693 } 694 } 695 696 rose_insert_socket(sk); 697 698 sock_reset_flag(sk, SOCK_ZAPPED); 699 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 700 return 0; 701 } 702 703 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) 704 { 705 struct sock *sk = sock->sk; 706 struct rose_sock *rose = rose_sk(sk); 707 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 708 unsigned char cause, diagnostic; 709 struct net_device *dev; 710 ax25_uid_assoc *user; 711 int n, err = 0; 712 713 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 714 return -EINVAL; 715 716 if (addr->srose_family != AF_ROSE) 717 return -EINVAL; 718 719 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 720 return -EINVAL; 721 722 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 723 return -EINVAL; 724 725 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ 726 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) 727 return -EINVAL; 728 729 lock_sock(sk); 730 731 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 732 /* Connect completed during a ERESTARTSYS event */ 733 sock->state = SS_CONNECTED; 734 goto out_release; 735 } 736 737 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 738 sock->state = SS_UNCONNECTED; 739 err = -ECONNREFUSED; 740 goto out_release; 741 } 742 743 if (sk->sk_state == TCP_ESTABLISHED) { 744 /* No reconnect on a seqpacket socket */ 745 err = -EISCONN; 746 goto out_release; 747 } 748 749 sk->sk_state = TCP_CLOSE; 750 sock->state = SS_UNCONNECTED; 751 752 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, 753 &diagnostic); 754 if (!rose->neighbour) 755 return -ENETUNREACH; 756 757 rose->lci = rose_new_lci(rose->neighbour); 758 if (!rose->lci) { 759 err = -ENETUNREACH; 760 goto out_release; 761 } 762 763 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ 764 sock_reset_flag(sk, SOCK_ZAPPED); 765 766 if ((dev = rose_dev_first()) == NULL) { 767 err = -ENETUNREACH; 768 goto out_release; 769 } 770 771 user = ax25_findbyuid(current->euid); 772 if (!user) { 773 err = -EINVAL; 774 goto out_release; 775 } 776 777 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); 778 rose->source_call = user->call; 779 rose->device = dev; 780 ax25_uid_put(user); 781 782 rose_insert_socket(sk); /* Finish the bind */ 783 } 784 rose_try_next_neigh: 785 rose->dest_addr = addr->srose_addr; 786 rose->dest_call = addr->srose_call; 787 rose->rand = ((long)rose & 0xFFFF) + rose->lci; 788 rose->dest_ndigis = addr->srose_ndigis; 789 790 if (addr_len == sizeof(struct full_sockaddr_rose)) { 791 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 792 for (n = 0 ; n < addr->srose_ndigis ; n++) 793 rose->dest_digis[n] = full_addr->srose_digis[n]; 794 } else { 795 if (rose->dest_ndigis == 1) { 796 rose->dest_digis[0] = addr->srose_digi; 797 } 798 } 799 800 /* Move to connecting socket, start sending Connect Requests */ 801 sock->state = SS_CONNECTING; 802 sk->sk_state = TCP_SYN_SENT; 803 804 rose->state = ROSE_STATE_1; 805 806 rose->neighbour->use++; 807 808 rose_write_internal(sk, ROSE_CALL_REQUEST); 809 rose_start_heartbeat(sk); 810 rose_start_t1timer(sk); 811 812 /* Now the loop */ 813 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { 814 err = -EINPROGRESS; 815 goto out_release; 816 } 817 818 /* 819 * A Connect Ack with Choke or timeout or failed routing will go to 820 * closed. 821 */ 822 if (sk->sk_state == TCP_SYN_SENT) { 823 DEFINE_WAIT(wait); 824 825 for (;;) { 826 prepare_to_wait(sk->sk_sleep, &wait, 827 TASK_INTERRUPTIBLE); 828 if (sk->sk_state != TCP_SYN_SENT) 829 break; 830 if (!signal_pending(current)) { 831 release_sock(sk); 832 schedule(); 833 lock_sock(sk); 834 continue; 835 } 836 err = -ERESTARTSYS; 837 break; 838 } 839 finish_wait(sk->sk_sleep, &wait); 840 841 if (err) 842 goto out_release; 843 } 844 845 if (sk->sk_state != TCP_ESTABLISHED) { 846 /* Try next neighbour */ 847 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic); 848 if (rose->neighbour) 849 goto rose_try_next_neigh; 850 851 /* No more neighbours */ 852 sock->state = SS_UNCONNECTED; 853 err = sock_error(sk); /* Always set at this point */ 854 goto out_release; 855 } 856 857 sock->state = SS_CONNECTED; 858 859 out_release: 860 release_sock(sk); 861 862 return err; 863 } 864 865 static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 866 { 867 struct sk_buff *skb; 868 struct sock *newsk; 869 DEFINE_WAIT(wait); 870 struct sock *sk; 871 int err = 0; 872 873 if ((sk = sock->sk) == NULL) 874 return -EINVAL; 875 876 lock_sock(sk); 877 if (sk->sk_type != SOCK_SEQPACKET) { 878 err = -EOPNOTSUPP; 879 goto out_release; 880 } 881 882 if (sk->sk_state != TCP_LISTEN) { 883 err = -EINVAL; 884 goto out_release; 885 } 886 887 /* 888 * The write queue this time is holding sockets ready to use 889 * hooked into the SABM we saved 890 */ 891 for (;;) { 892 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 893 894 skb = skb_dequeue(&sk->sk_receive_queue); 895 if (skb) 896 break; 897 898 if (flags & O_NONBLOCK) { 899 err = -EWOULDBLOCK; 900 break; 901 } 902 if (!signal_pending(current)) { 903 release_sock(sk); 904 schedule(); 905 lock_sock(sk); 906 continue; 907 } 908 err = -ERESTARTSYS; 909 break; 910 } 911 finish_wait(sk->sk_sleep, &wait); 912 if (err) 913 goto out_release; 914 915 newsk = skb->sk; 916 newsk->sk_socket = newsock; 917 newsk->sk_sleep = &newsock->wait; 918 919 /* Now attach up the new socket */ 920 skb->sk = NULL; 921 kfree_skb(skb); 922 sk->sk_ack_backlog--; 923 newsock->sk = newsk; 924 925 out_release: 926 release_sock(sk); 927 928 return err; 929 } 930 931 static int rose_getname(struct socket *sock, struct sockaddr *uaddr, 932 int *uaddr_len, int peer) 933 { 934 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; 935 struct sock *sk = sock->sk; 936 struct rose_sock *rose = rose_sk(sk); 937 int n; 938 939 if (peer != 0) { 940 if (sk->sk_state != TCP_ESTABLISHED) 941 return -ENOTCONN; 942 srose->srose_family = AF_ROSE; 943 srose->srose_addr = rose->dest_addr; 944 srose->srose_call = rose->dest_call; 945 srose->srose_ndigis = rose->dest_ndigis; 946 for (n = 0; n < rose->dest_ndigis; n++) 947 srose->srose_digis[n] = rose->dest_digis[n]; 948 } else { 949 srose->srose_family = AF_ROSE; 950 srose->srose_addr = rose->source_addr; 951 srose->srose_call = rose->source_call; 952 srose->srose_ndigis = rose->source_ndigis; 953 for (n = 0; n < rose->source_ndigis; n++) 954 srose->srose_digis[n] = rose->source_digis[n]; 955 } 956 957 *uaddr_len = sizeof(struct full_sockaddr_rose); 958 return 0; 959 } 960 961 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) 962 { 963 struct sock *sk; 964 struct sock *make; 965 struct rose_sock *make_rose; 966 struct rose_facilities_struct facilities; 967 int n, len; 968 969 skb->sk = NULL; /* Initially we don't know who it's for */ 970 971 /* 972 * skb->data points to the rose frame start 973 */ 974 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); 975 976 len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; 977 len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; 978 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { 979 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); 980 return 0; 981 } 982 983 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); 984 985 /* 986 * We can't accept the Call Request. 987 */ 988 if (sk == NULL || sk_acceptq_is_full(sk) || 989 (make = rose_make_new(sk)) == NULL) { 990 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); 991 return 0; 992 } 993 994 skb->sk = make; 995 make->sk_state = TCP_ESTABLISHED; 996 make_rose = rose_sk(make); 997 998 make_rose->lci = lci; 999 make_rose->dest_addr = facilities.dest_addr; 1000 make_rose->dest_call = facilities.dest_call; 1001 make_rose->dest_ndigis = facilities.dest_ndigis; 1002 for (n = 0 ; n < facilities.dest_ndigis ; n++) 1003 make_rose->dest_digis[n] = facilities.dest_digis[n]; 1004 make_rose->source_addr = facilities.source_addr; 1005 make_rose->source_call = facilities.source_call; 1006 make_rose->source_ndigis = facilities.source_ndigis; 1007 for (n = 0 ; n < facilities.source_ndigis ; n++) 1008 make_rose->source_digis[n]= facilities.source_digis[n]; 1009 make_rose->neighbour = neigh; 1010 make_rose->device = dev; 1011 make_rose->facilities = facilities; 1012 1013 make_rose->neighbour->use++; 1014 1015 if (rose_sk(sk)->defer) { 1016 make_rose->state = ROSE_STATE_5; 1017 } else { 1018 rose_write_internal(make, ROSE_CALL_ACCEPTED); 1019 make_rose->state = ROSE_STATE_3; 1020 rose_start_idletimer(make); 1021 } 1022 1023 make_rose->condition = 0x00; 1024 make_rose->vs = 0; 1025 make_rose->va = 0; 1026 make_rose->vr = 0; 1027 make_rose->vl = 0; 1028 sk->sk_ack_backlog++; 1029 1030 rose_insert_socket(make); 1031 1032 skb_queue_head(&sk->sk_receive_queue, skb); 1033 1034 rose_start_heartbeat(make); 1035 1036 if (!sock_flag(sk, SOCK_DEAD)) 1037 sk->sk_data_ready(sk, skb->len); 1038 1039 return 1; 1040 } 1041 1042 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, 1043 struct msghdr *msg, size_t len) 1044 { 1045 struct sock *sk = sock->sk; 1046 struct rose_sock *rose = rose_sk(sk); 1047 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; 1048 int err; 1049 struct full_sockaddr_rose srose; 1050 struct sk_buff *skb; 1051 unsigned char *asmptr; 1052 int n, size, qbit = 0; 1053 1054 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1055 return -EINVAL; 1056 1057 if (sock_flag(sk, SOCK_ZAPPED)) 1058 return -EADDRNOTAVAIL; 1059 1060 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1061 send_sig(SIGPIPE, current, 0); 1062 return -EPIPE; 1063 } 1064 1065 if (rose->neighbour == NULL || rose->device == NULL) 1066 return -ENETUNREACH; 1067 1068 if (usrose != NULL) { 1069 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) 1070 return -EINVAL; 1071 memset(&srose, 0, sizeof(struct full_sockaddr_rose)); 1072 memcpy(&srose, usrose, msg->msg_namelen); 1073 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || 1074 ax25cmp(&rose->dest_call, &srose.srose_call) != 0) 1075 return -EISCONN; 1076 if (srose.srose_ndigis != rose->dest_ndigis) 1077 return -EISCONN; 1078 if (srose.srose_ndigis == rose->dest_ndigis) { 1079 for (n = 0 ; n < srose.srose_ndigis ; n++) 1080 if (ax25cmp(&rose->dest_digis[n], 1081 &srose.srose_digis[n])) 1082 return -EISCONN; 1083 } 1084 if (srose.srose_family != AF_ROSE) 1085 return -EINVAL; 1086 } else { 1087 if (sk->sk_state != TCP_ESTABLISHED) 1088 return -ENOTCONN; 1089 1090 srose.srose_family = AF_ROSE; 1091 srose.srose_addr = rose->dest_addr; 1092 srose.srose_call = rose->dest_call; 1093 srose.srose_ndigis = rose->dest_ndigis; 1094 for (n = 0 ; n < rose->dest_ndigis ; n++) 1095 srose.srose_digis[n] = rose->dest_digis[n]; 1096 } 1097 1098 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n"); 1099 1100 /* Build a packet */ 1101 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); 1102 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; 1103 1104 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1105 return err; 1106 1107 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); 1108 1109 /* 1110 * Put the data on the end 1111 */ 1112 SOCK_DEBUG(sk, "ROSE: Appending user data\n"); 1113 1114 skb_reset_transport_header(skb); 1115 skb_put(skb, len); 1116 1117 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1118 if (err) { 1119 kfree_skb(skb); 1120 return err; 1121 } 1122 1123 /* 1124 * If the Q BIT Include socket option is in force, the first 1125 * byte of the user data is the logical value of the Q Bit. 1126 */ 1127 if (rose->qbitincl) { 1128 qbit = skb->data[0]; 1129 skb_pull(skb, 1); 1130 } 1131 1132 /* 1133 * Push down the ROSE header 1134 */ 1135 asmptr = skb_push(skb, ROSE_MIN_LEN); 1136 1137 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n"); 1138 1139 /* Build a ROSE Network header */ 1140 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1141 asmptr[1] = (rose->lci >> 0) & 0xFF; 1142 asmptr[2] = ROSE_DATA; 1143 1144 if (qbit) 1145 asmptr[0] |= ROSE_Q_BIT; 1146 1147 SOCK_DEBUG(sk, "ROSE: Built header.\n"); 1148 1149 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n"); 1150 1151 if (sk->sk_state != TCP_ESTABLISHED) { 1152 kfree_skb(skb); 1153 return -ENOTCONN; 1154 } 1155 1156 #ifdef M_BIT 1157 #define ROSE_PACLEN (256-ROSE_MIN_LEN) 1158 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { 1159 unsigned char header[ROSE_MIN_LEN]; 1160 struct sk_buff *skbn; 1161 int frontlen; 1162 int lg; 1163 1164 /* Save a copy of the Header */ 1165 skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); 1166 skb_pull(skb, ROSE_MIN_LEN); 1167 1168 frontlen = skb_headroom(skb); 1169 1170 while (skb->len > 0) { 1171 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { 1172 kfree_skb(skb); 1173 return err; 1174 } 1175 1176 skbn->sk = sk; 1177 skbn->free = 1; 1178 skbn->arp = 1; 1179 1180 skb_reserve(skbn, frontlen); 1181 1182 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; 1183 1184 /* Copy the user data */ 1185 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); 1186 skb_pull(skb, lg); 1187 1188 /* Duplicate the Header */ 1189 skb_push(skbn, ROSE_MIN_LEN); 1190 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); 1191 1192 if (skb->len > 0) 1193 skbn->data[2] |= M_BIT; 1194 1195 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ 1196 } 1197 1198 skb->free = 1; 1199 kfree_skb(skb); 1200 } else { 1201 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ 1202 } 1203 #else 1204 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ 1205 #endif 1206 1207 rose_kick(sk); 1208 1209 return len; 1210 } 1211 1212 1213 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, 1214 struct msghdr *msg, size_t size, int flags) 1215 { 1216 struct sock *sk = sock->sk; 1217 struct rose_sock *rose = rose_sk(sk); 1218 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; 1219 size_t copied; 1220 unsigned char *asmptr; 1221 struct sk_buff *skb; 1222 int n, er, qbit; 1223 1224 /* 1225 * This works for seqpacket too. The receiver has ordered the queue for 1226 * us! We do one quick check first though 1227 */ 1228 if (sk->sk_state != TCP_ESTABLISHED) 1229 return -ENOTCONN; 1230 1231 /* Now we can treat all alike */ 1232 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) 1233 return er; 1234 1235 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; 1236 1237 skb_pull(skb, ROSE_MIN_LEN); 1238 1239 if (rose->qbitincl) { 1240 asmptr = skb_push(skb, 1); 1241 *asmptr = qbit; 1242 } 1243 1244 skb_reset_transport_header(skb); 1245 copied = skb->len; 1246 1247 if (copied > size) { 1248 copied = size; 1249 msg->msg_flags |= MSG_TRUNC; 1250 } 1251 1252 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1253 1254 if (srose != NULL) { 1255 srose->srose_family = AF_ROSE; 1256 srose->srose_addr = rose->dest_addr; 1257 srose->srose_call = rose->dest_call; 1258 srose->srose_ndigis = rose->dest_ndigis; 1259 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { 1260 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; 1261 for (n = 0 ; n < rose->dest_ndigis ; n++) 1262 full_srose->srose_digis[n] = rose->dest_digis[n]; 1263 msg->msg_namelen = sizeof(struct full_sockaddr_rose); 1264 } else { 1265 if (rose->dest_ndigis >= 1) { 1266 srose->srose_ndigis = 1; 1267 srose->srose_digi = rose->dest_digis[0]; 1268 } 1269 msg->msg_namelen = sizeof(struct sockaddr_rose); 1270 } 1271 } 1272 1273 skb_free_datagram(sk, skb); 1274 1275 return copied; 1276 } 1277 1278 1279 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1280 { 1281 struct sock *sk = sock->sk; 1282 struct rose_sock *rose = rose_sk(sk); 1283 void __user *argp = (void __user *)arg; 1284 1285 switch (cmd) { 1286 case TIOCOUTQ: { 1287 long amount; 1288 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1289 if (amount < 0) 1290 amount = 0; 1291 return put_user(amount, (unsigned int __user *) argp); 1292 } 1293 1294 case TIOCINQ: { 1295 struct sk_buff *skb; 1296 long amount = 0L; 1297 /* These two are safe on a single CPU system as only user tasks fiddle here */ 1298 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1299 amount = skb->len; 1300 return put_user(amount, (unsigned int __user *) argp); 1301 } 1302 1303 case SIOCGSTAMP: 1304 return sock_get_timestamp(sk, (struct timeval __user *) argp); 1305 1306 case SIOCGSTAMPNS: 1307 return sock_get_timestampns(sk, (struct timespec __user *) argp); 1308 1309 case SIOCGIFADDR: 1310 case SIOCSIFADDR: 1311 case SIOCGIFDSTADDR: 1312 case SIOCSIFDSTADDR: 1313 case SIOCGIFBRDADDR: 1314 case SIOCSIFBRDADDR: 1315 case SIOCGIFNETMASK: 1316 case SIOCSIFNETMASK: 1317 case SIOCGIFMETRIC: 1318 case SIOCSIFMETRIC: 1319 return -EINVAL; 1320 1321 case SIOCADDRT: 1322 case SIOCDELRT: 1323 case SIOCRSCLRRT: 1324 if (!capable(CAP_NET_ADMIN)) 1325 return -EPERM; 1326 return rose_rt_ioctl(cmd, argp); 1327 1328 case SIOCRSGCAUSE: { 1329 struct rose_cause_struct rose_cause; 1330 rose_cause.cause = rose->cause; 1331 rose_cause.diagnostic = rose->diagnostic; 1332 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; 1333 } 1334 1335 case SIOCRSSCAUSE: { 1336 struct rose_cause_struct rose_cause; 1337 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) 1338 return -EFAULT; 1339 rose->cause = rose_cause.cause; 1340 rose->diagnostic = rose_cause.diagnostic; 1341 return 0; 1342 } 1343 1344 case SIOCRSSL2CALL: 1345 if (!capable(CAP_NET_ADMIN)) return -EPERM; 1346 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1347 ax25_listen_release(&rose_callsign, NULL); 1348 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) 1349 return -EFAULT; 1350 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1351 return ax25_listen_register(&rose_callsign, NULL); 1352 1353 return 0; 1354 1355 case SIOCRSGL2CALL: 1356 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; 1357 1358 case SIOCRSACCEPT: 1359 if (rose->state == ROSE_STATE_5) { 1360 rose_write_internal(sk, ROSE_CALL_ACCEPTED); 1361 rose_start_idletimer(sk); 1362 rose->condition = 0x00; 1363 rose->vs = 0; 1364 rose->va = 0; 1365 rose->vr = 0; 1366 rose->vl = 0; 1367 rose->state = ROSE_STATE_3; 1368 } 1369 return 0; 1370 1371 default: 1372 return -ENOIOCTLCMD; 1373 } 1374 1375 return 0; 1376 } 1377 1378 #ifdef CONFIG_PROC_FS 1379 static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1380 __acquires(rose_list_lock) 1381 { 1382 int i; 1383 struct sock *s; 1384 struct hlist_node *node; 1385 1386 spin_lock_bh(&rose_list_lock); 1387 if (*pos == 0) 1388 return SEQ_START_TOKEN; 1389 1390 i = 1; 1391 sk_for_each(s, node, &rose_list) { 1392 if (i == *pos) 1393 return s; 1394 ++i; 1395 } 1396 return NULL; 1397 } 1398 1399 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1400 { 1401 ++*pos; 1402 1403 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list) 1404 : sk_next((struct sock *)v); 1405 } 1406 1407 static void rose_info_stop(struct seq_file *seq, void *v) 1408 __releases(rose_list_lock) 1409 { 1410 spin_unlock_bh(&rose_list_lock); 1411 } 1412 1413 static int rose_info_show(struct seq_file *seq, void *v) 1414 { 1415 char buf[11]; 1416 1417 if (v == SEQ_START_TOKEN) 1418 seq_puts(seq, 1419 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1420 1421 else { 1422 struct sock *s = v; 1423 struct rose_sock *rose = rose_sk(s); 1424 const char *devname, *callsign; 1425 const struct net_device *dev = rose->device; 1426 1427 if (!dev) 1428 devname = "???"; 1429 else 1430 devname = dev->name; 1431 1432 seq_printf(seq, "%-10s %-9s ", 1433 rose2asc(&rose->dest_addr), 1434 ax2asc(buf, &rose->dest_call)); 1435 1436 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) 1437 callsign = "??????-?"; 1438 else 1439 callsign = ax2asc(buf, &rose->source_call); 1440 1441 seq_printf(seq, 1442 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", 1443 rose2asc(&rose->source_addr), 1444 callsign, 1445 devname, 1446 rose->lci & 0x0FFF, 1447 (rose->neighbour) ? rose->neighbour->number : 0, 1448 rose->state, 1449 rose->vs, 1450 rose->vr, 1451 rose->va, 1452 ax25_display_timer(&rose->timer) / HZ, 1453 rose->t1 / HZ, 1454 rose->t2 / HZ, 1455 rose->t3 / HZ, 1456 rose->hb / HZ, 1457 ax25_display_timer(&rose->idletimer) / (60 * HZ), 1458 rose->idle / (60 * HZ), 1459 atomic_read(&s->sk_wmem_alloc), 1460 atomic_read(&s->sk_rmem_alloc), 1461 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 1462 } 1463 1464 return 0; 1465 } 1466 1467 static const struct seq_operations rose_info_seqops = { 1468 .start = rose_info_start, 1469 .next = rose_info_next, 1470 .stop = rose_info_stop, 1471 .show = rose_info_show, 1472 }; 1473 1474 static int rose_info_open(struct inode *inode, struct file *file) 1475 { 1476 return seq_open(file, &rose_info_seqops); 1477 } 1478 1479 static const struct file_operations rose_info_fops = { 1480 .owner = THIS_MODULE, 1481 .open = rose_info_open, 1482 .read = seq_read, 1483 .llseek = seq_lseek, 1484 .release = seq_release, 1485 }; 1486 #endif /* CONFIG_PROC_FS */ 1487 1488 static struct net_proto_family rose_family_ops = { 1489 .family = PF_ROSE, 1490 .create = rose_create, 1491 .owner = THIS_MODULE, 1492 }; 1493 1494 static struct proto_ops rose_proto_ops = { 1495 .family = PF_ROSE, 1496 .owner = THIS_MODULE, 1497 .release = rose_release, 1498 .bind = rose_bind, 1499 .connect = rose_connect, 1500 .socketpair = sock_no_socketpair, 1501 .accept = rose_accept, 1502 .getname = rose_getname, 1503 .poll = datagram_poll, 1504 .ioctl = rose_ioctl, 1505 .listen = rose_listen, 1506 .shutdown = sock_no_shutdown, 1507 .setsockopt = rose_setsockopt, 1508 .getsockopt = rose_getsockopt, 1509 .sendmsg = rose_sendmsg, 1510 .recvmsg = rose_recvmsg, 1511 .mmap = sock_no_mmap, 1512 .sendpage = sock_no_sendpage, 1513 }; 1514 1515 static struct notifier_block rose_dev_notifier = { 1516 .notifier_call = rose_device_event, 1517 }; 1518 1519 static struct net_device **dev_rose; 1520 1521 static struct ax25_protocol rose_pid = { 1522 .pid = AX25_P_ROSE, 1523 .func = rose_route_frame 1524 }; 1525 1526 static struct ax25_linkfail rose_linkfail_notifier = { 1527 .func = rose_link_failed 1528 }; 1529 1530 static int __init rose_proto_init(void) 1531 { 1532 int i; 1533 int rc; 1534 1535 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { 1536 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); 1537 rc = -EINVAL; 1538 goto out; 1539 } 1540 1541 rc = proto_register(&rose_proto, 0); 1542 if (rc != 0) 1543 goto out; 1544 1545 rose_callsign = null_ax25_address; 1546 1547 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1548 if (dev_rose == NULL) { 1549 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1550 rc = -ENOMEM; 1551 goto out_proto_unregister; 1552 } 1553 1554 for (i = 0; i < rose_ndevs; i++) { 1555 struct net_device *dev; 1556 char name[IFNAMSIZ]; 1557 1558 sprintf(name, "rose%d", i); 1559 dev = alloc_netdev(sizeof(struct net_device_stats), 1560 name, rose_setup); 1561 if (!dev) { 1562 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1563 rc = -ENOMEM; 1564 goto fail; 1565 } 1566 rc = register_netdev(dev); 1567 if (rc) { 1568 printk(KERN_ERR "ROSE: netdevice registration failed\n"); 1569 free_netdev(dev); 1570 goto fail; 1571 } 1572 lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key); 1573 dev_rose[i] = dev; 1574 } 1575 1576 sock_register(&rose_family_ops); 1577 register_netdevice_notifier(&rose_dev_notifier); 1578 1579 ax25_register_pid(&rose_pid); 1580 ax25_linkfail_register(&rose_linkfail_notifier); 1581 1582 #ifdef CONFIG_SYSCTL 1583 rose_register_sysctl(); 1584 #endif 1585 rose_loopback_init(); 1586 1587 rose_add_loopback_neigh(); 1588 1589 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); 1590 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); 1591 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); 1592 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); 1593 out: 1594 return rc; 1595 fail: 1596 while (--i >= 0) { 1597 unregister_netdev(dev_rose[i]); 1598 free_netdev(dev_rose[i]); 1599 } 1600 kfree(dev_rose); 1601 out_proto_unregister: 1602 proto_unregister(&rose_proto); 1603 goto out; 1604 } 1605 module_init(rose_proto_init); 1606 1607 module_param(rose_ndevs, int, 0); 1608 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); 1609 1610 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); 1611 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); 1612 MODULE_LICENSE("GPL"); 1613 MODULE_ALIAS_NETPROTO(PF_ROSE); 1614 1615 static void __exit rose_exit(void) 1616 { 1617 int i; 1618 1619 proc_net_remove(&init_net, "rose"); 1620 proc_net_remove(&init_net, "rose_neigh"); 1621 proc_net_remove(&init_net, "rose_nodes"); 1622 proc_net_remove(&init_net, "rose_routes"); 1623 rose_loopback_clear(); 1624 1625 rose_rt_free(); 1626 1627 ax25_protocol_release(AX25_P_ROSE); 1628 ax25_linkfail_release(&rose_linkfail_notifier); 1629 1630 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1631 ax25_listen_release(&rose_callsign, NULL); 1632 1633 #ifdef CONFIG_SYSCTL 1634 rose_unregister_sysctl(); 1635 #endif 1636 unregister_netdevice_notifier(&rose_dev_notifier); 1637 1638 sock_unregister(PF_ROSE); 1639 1640 for (i = 0; i < rose_ndevs; i++) { 1641 struct net_device *dev = dev_rose[i]; 1642 1643 if (dev) { 1644 unregister_netdev(dev); 1645 free_netdev(dev); 1646 } 1647 } 1648 1649 kfree(dev_rose); 1650 proto_unregister(&rose_proto); 1651 } 1652 1653 module_exit(rose_exit); 1654