1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) 10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) 11 */ 12 13 #include <linux/capability.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/errno.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/in.h> 21 #include <linux/kernel.h> 22 #include <linux/sched.h> 23 #include <linux/spinlock.h> 24 #include <linux/timer.h> 25 #include <linux/string.h> 26 #include <linux/sockios.h> 27 #include <linux/net.h> 28 #include <linux/stat.h> 29 #include <net/net_namespace.h> 30 #include <net/ax25.h> 31 #include <linux/inet.h> 32 #include <linux/netdevice.h> 33 #include <linux/if_arp.h> 34 #include <linux/skbuff.h> 35 #include <net/sock.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <linux/fcntl.h> 39 #include <linux/termios.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/notifier.h> 43 #include <net/rose.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <net/tcp_states.h> 47 #include <net/ip.h> 48 #include <net/arp.h> 49 50 static int rose_ndevs = 10; 51 52 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; 53 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; 54 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; 55 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; 56 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; 57 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; 58 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; 59 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; 60 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; 61 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; 62 63 static HLIST_HEAD(rose_list); 64 static DEFINE_SPINLOCK(rose_list_lock); 65 66 static struct proto_ops rose_proto_ops; 67 68 ax25_address rose_callsign; 69 70 /* 71 * ROSE network devices are virtual network devices encapsulating ROSE 72 * frames into AX.25 which will be sent through an AX.25 device, so form a 73 * special "super class" of normal net devices; split their locks off into a 74 * separate class since they always nest. 75 */ 76 static struct lock_class_key rose_netdev_xmit_lock_key; 77 static struct lock_class_key rose_netdev_addr_lock_key; 78 79 static void rose_set_lockdep_one(struct net_device *dev, 80 struct netdev_queue *txq, 81 void *_unused) 82 { 83 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); 84 } 85 86 static void rose_set_lockdep_key(struct net_device *dev) 87 { 88 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); 89 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); 90 } 91 92 /* 93 * Convert a ROSE address into text. 94 */ 95 const char *rose2asc(const rose_address *addr) 96 { 97 static char buffer[11]; 98 99 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && 100 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && 101 addr->rose_addr[4] == 0x00) { 102 strcpy(buffer, "*"); 103 } else { 104 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, 105 addr->rose_addr[1] & 0xFF, 106 addr->rose_addr[2] & 0xFF, 107 addr->rose_addr[3] & 0xFF, 108 addr->rose_addr[4] & 0xFF); 109 } 110 111 return buffer; 112 } 113 114 /* 115 * Compare two ROSE addresses, 0 == equal. 116 */ 117 int rosecmp(rose_address *addr1, rose_address *addr2) 118 { 119 int i; 120 121 for (i = 0; i < 5; i++) 122 if (addr1->rose_addr[i] != addr2->rose_addr[i]) 123 return 1; 124 125 return 0; 126 } 127 128 /* 129 * Compare two ROSE addresses for only mask digits, 0 == equal. 130 */ 131 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) 132 { 133 unsigned int i, j; 134 135 if (mask > 10) 136 return 1; 137 138 for (i = 0; i < mask; i++) { 139 j = i / 2; 140 141 if ((i % 2) != 0) { 142 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) 143 return 1; 144 } else { 145 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) 146 return 1; 147 } 148 } 149 150 return 0; 151 } 152 153 /* 154 * Socket removal during an interrupt is now safe. 155 */ 156 static void rose_remove_socket(struct sock *sk) 157 { 158 spin_lock_bh(&rose_list_lock); 159 sk_del_node_init(sk); 160 spin_unlock_bh(&rose_list_lock); 161 } 162 163 /* 164 * Kill all bound sockets on a broken link layer connection to a 165 * particular neighbour. 166 */ 167 void rose_kill_by_neigh(struct rose_neigh *neigh) 168 { 169 struct sock *s; 170 struct hlist_node *node; 171 172 spin_lock_bh(&rose_list_lock); 173 sk_for_each(s, node, &rose_list) { 174 struct rose_sock *rose = rose_sk(s); 175 176 if (rose->neighbour == neigh) { 177 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 178 rose->neighbour->use--; 179 rose->neighbour = NULL; 180 } 181 } 182 spin_unlock_bh(&rose_list_lock); 183 } 184 185 /* 186 * Kill all bound sockets on a dropped device. 187 */ 188 static void rose_kill_by_device(struct net_device *dev) 189 { 190 struct sock *s; 191 struct hlist_node *node; 192 193 spin_lock_bh(&rose_list_lock); 194 sk_for_each(s, node, &rose_list) { 195 struct rose_sock *rose = rose_sk(s); 196 197 if (rose->device == dev) { 198 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 199 rose->neighbour->use--; 200 rose->device = NULL; 201 } 202 } 203 spin_unlock_bh(&rose_list_lock); 204 } 205 206 /* 207 * Handle device status changes. 208 */ 209 static int rose_device_event(struct notifier_block *this, unsigned long event, 210 void *ptr) 211 { 212 struct net_device *dev = (struct net_device *)ptr; 213 214 if (!net_eq(dev_net(dev), &init_net)) 215 return NOTIFY_DONE; 216 217 if (event != NETDEV_DOWN) 218 return NOTIFY_DONE; 219 220 switch (dev->type) { 221 case ARPHRD_ROSE: 222 rose_kill_by_device(dev); 223 break; 224 case ARPHRD_AX25: 225 rose_link_device_down(dev); 226 rose_rt_device_down(dev); 227 break; 228 } 229 230 return NOTIFY_DONE; 231 } 232 233 /* 234 * Add a socket to the bound sockets list. 235 */ 236 static void rose_insert_socket(struct sock *sk) 237 { 238 239 spin_lock_bh(&rose_list_lock); 240 sk_add_node(sk, &rose_list); 241 spin_unlock_bh(&rose_list_lock); 242 } 243 244 /* 245 * Find a socket that wants to accept the Call Request we just 246 * received. 247 */ 248 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) 249 { 250 struct sock *s; 251 struct hlist_node *node; 252 253 spin_lock_bh(&rose_list_lock); 254 sk_for_each(s, node, &rose_list) { 255 struct rose_sock *rose = rose_sk(s); 256 257 if (!rosecmp(&rose->source_addr, addr) && 258 !ax25cmp(&rose->source_call, call) && 259 !rose->source_ndigis && s->sk_state == TCP_LISTEN) 260 goto found; 261 } 262 263 sk_for_each(s, node, &rose_list) { 264 struct rose_sock *rose = rose_sk(s); 265 266 if (!rosecmp(&rose->source_addr, addr) && 267 !ax25cmp(&rose->source_call, &null_ax25_address) && 268 s->sk_state == TCP_LISTEN) 269 goto found; 270 } 271 s = NULL; 272 found: 273 spin_unlock_bh(&rose_list_lock); 274 return s; 275 } 276 277 /* 278 * Find a connected ROSE socket given my LCI and device. 279 */ 280 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) 281 { 282 struct sock *s; 283 struct hlist_node *node; 284 285 spin_lock_bh(&rose_list_lock); 286 sk_for_each(s, node, &rose_list) { 287 struct rose_sock *rose = rose_sk(s); 288 289 if (rose->lci == lci && rose->neighbour == neigh) 290 goto found; 291 } 292 s = NULL; 293 found: 294 spin_unlock_bh(&rose_list_lock); 295 return s; 296 } 297 298 /* 299 * Find a unique LCI for a given device. 300 */ 301 unsigned int rose_new_lci(struct rose_neigh *neigh) 302 { 303 int lci; 304 305 if (neigh->dce_mode) { 306 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) 307 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 308 return lci; 309 } else { 310 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) 311 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 312 return lci; 313 } 314 315 return 0; 316 } 317 318 /* 319 * Deferred destroy. 320 */ 321 void rose_destroy_socket(struct sock *); 322 323 /* 324 * Handler for deferred kills. 325 */ 326 static void rose_destroy_timer(unsigned long data) 327 { 328 rose_destroy_socket((struct sock *)data); 329 } 330 331 /* 332 * This is called from user mode and the timers. Thus it protects itself 333 * against interrupt users but doesn't worry about being called during 334 * work. Once it is removed from the queue no interrupt or bottom half 335 * will touch it and we are (fairly 8-) ) safe. 336 */ 337 void rose_destroy_socket(struct sock *sk) 338 { 339 struct sk_buff *skb; 340 341 rose_remove_socket(sk); 342 rose_stop_heartbeat(sk); 343 rose_stop_idletimer(sk); 344 rose_stop_timer(sk); 345 346 rose_clear_queues(sk); /* Flush the queues */ 347 348 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 349 if (skb->sk != sk) { /* A pending connection */ 350 /* Queue the unaccepted socket for death */ 351 sock_set_flag(skb->sk, SOCK_DEAD); 352 rose_start_heartbeat(skb->sk); 353 rose_sk(skb->sk)->state = ROSE_STATE_0; 354 } 355 356 kfree_skb(skb); 357 } 358 359 if (atomic_read(&sk->sk_wmem_alloc) || 360 atomic_read(&sk->sk_rmem_alloc)) { 361 /* Defer: outstanding buffers */ 362 setup_timer(&sk->sk_timer, rose_destroy_timer, 363 (unsigned long)sk); 364 sk->sk_timer.expires = jiffies + 10 * HZ; 365 add_timer(&sk->sk_timer); 366 } else 367 sock_put(sk); 368 } 369 370 /* 371 * Handling for system calls applied via the various interfaces to a 372 * ROSE socket object. 373 */ 374 375 static int rose_setsockopt(struct socket *sock, int level, int optname, 376 char __user *optval, int optlen) 377 { 378 struct sock *sk = sock->sk; 379 struct rose_sock *rose = rose_sk(sk); 380 int opt; 381 382 if (level != SOL_ROSE) 383 return -ENOPROTOOPT; 384 385 if (optlen < sizeof(int)) 386 return -EINVAL; 387 388 if (get_user(opt, (int __user *)optval)) 389 return -EFAULT; 390 391 switch (optname) { 392 case ROSE_DEFER: 393 rose->defer = opt ? 1 : 0; 394 return 0; 395 396 case ROSE_T1: 397 if (opt < 1) 398 return -EINVAL; 399 rose->t1 = opt * HZ; 400 return 0; 401 402 case ROSE_T2: 403 if (opt < 1) 404 return -EINVAL; 405 rose->t2 = opt * HZ; 406 return 0; 407 408 case ROSE_T3: 409 if (opt < 1) 410 return -EINVAL; 411 rose->t3 = opt * HZ; 412 return 0; 413 414 case ROSE_HOLDBACK: 415 if (opt < 1) 416 return -EINVAL; 417 rose->hb = opt * HZ; 418 return 0; 419 420 case ROSE_IDLE: 421 if (opt < 0) 422 return -EINVAL; 423 rose->idle = opt * 60 * HZ; 424 return 0; 425 426 case ROSE_QBITINCL: 427 rose->qbitincl = opt ? 1 : 0; 428 return 0; 429 430 default: 431 return -ENOPROTOOPT; 432 } 433 } 434 435 static int rose_getsockopt(struct socket *sock, int level, int optname, 436 char __user *optval, int __user *optlen) 437 { 438 struct sock *sk = sock->sk; 439 struct rose_sock *rose = rose_sk(sk); 440 int val = 0; 441 int len; 442 443 if (level != SOL_ROSE) 444 return -ENOPROTOOPT; 445 446 if (get_user(len, optlen)) 447 return -EFAULT; 448 449 if (len < 0) 450 return -EINVAL; 451 452 switch (optname) { 453 case ROSE_DEFER: 454 val = rose->defer; 455 break; 456 457 case ROSE_T1: 458 val = rose->t1 / HZ; 459 break; 460 461 case ROSE_T2: 462 val = rose->t2 / HZ; 463 break; 464 465 case ROSE_T3: 466 val = rose->t3 / HZ; 467 break; 468 469 case ROSE_HOLDBACK: 470 val = rose->hb / HZ; 471 break; 472 473 case ROSE_IDLE: 474 val = rose->idle / (60 * HZ); 475 break; 476 477 case ROSE_QBITINCL: 478 val = rose->qbitincl; 479 break; 480 481 default: 482 return -ENOPROTOOPT; 483 } 484 485 len = min_t(unsigned int, len, sizeof(int)); 486 487 if (put_user(len, optlen)) 488 return -EFAULT; 489 490 return copy_to_user(optval, &val, len) ? -EFAULT : 0; 491 } 492 493 static int rose_listen(struct socket *sock, int backlog) 494 { 495 struct sock *sk = sock->sk; 496 497 if (sk->sk_state != TCP_LISTEN) { 498 struct rose_sock *rose = rose_sk(sk); 499 500 rose->dest_ndigis = 0; 501 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); 502 memset(&rose->dest_call, 0, AX25_ADDR_LEN); 503 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); 504 sk->sk_max_ack_backlog = backlog; 505 sk->sk_state = TCP_LISTEN; 506 return 0; 507 } 508 509 return -EOPNOTSUPP; 510 } 511 512 static struct proto rose_proto = { 513 .name = "ROSE", 514 .owner = THIS_MODULE, 515 .obj_size = sizeof(struct rose_sock), 516 }; 517 518 static int rose_create(struct net *net, struct socket *sock, int protocol) 519 { 520 struct sock *sk; 521 struct rose_sock *rose; 522 523 if (net != &init_net) 524 return -EAFNOSUPPORT; 525 526 if (sock->type != SOCK_SEQPACKET || protocol != 0) 527 return -ESOCKTNOSUPPORT; 528 529 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); 530 if (sk == NULL) 531 return -ENOMEM; 532 533 rose = rose_sk(sk); 534 535 sock_init_data(sock, sk); 536 537 skb_queue_head_init(&rose->ack_queue); 538 #ifdef M_BIT 539 skb_queue_head_init(&rose->frag_queue); 540 rose->fraglen = 0; 541 #endif 542 543 sock->ops = &rose_proto_ops; 544 sk->sk_protocol = protocol; 545 546 init_timer(&rose->timer); 547 init_timer(&rose->idletimer); 548 549 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); 550 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); 551 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); 552 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); 553 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); 554 555 rose->state = ROSE_STATE_0; 556 557 return 0; 558 } 559 560 static struct sock *rose_make_new(struct sock *osk) 561 { 562 struct sock *sk; 563 struct rose_sock *rose, *orose; 564 565 if (osk->sk_type != SOCK_SEQPACKET) 566 return NULL; 567 568 sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); 569 if (sk == NULL) 570 return NULL; 571 572 rose = rose_sk(sk); 573 574 sock_init_data(NULL, sk); 575 576 skb_queue_head_init(&rose->ack_queue); 577 #ifdef M_BIT 578 skb_queue_head_init(&rose->frag_queue); 579 rose->fraglen = 0; 580 #endif 581 582 sk->sk_type = osk->sk_type; 583 sk->sk_priority = osk->sk_priority; 584 sk->sk_protocol = osk->sk_protocol; 585 sk->sk_rcvbuf = osk->sk_rcvbuf; 586 sk->sk_sndbuf = osk->sk_sndbuf; 587 sk->sk_state = TCP_ESTABLISHED; 588 sock_copy_flags(sk, osk); 589 590 init_timer(&rose->timer); 591 init_timer(&rose->idletimer); 592 593 orose = rose_sk(osk); 594 rose->t1 = orose->t1; 595 rose->t2 = orose->t2; 596 rose->t3 = orose->t3; 597 rose->hb = orose->hb; 598 rose->idle = orose->idle; 599 rose->defer = orose->defer; 600 rose->device = orose->device; 601 rose->qbitincl = orose->qbitincl; 602 603 return sk; 604 } 605 606 static int rose_release(struct socket *sock) 607 { 608 struct sock *sk = sock->sk; 609 struct rose_sock *rose; 610 611 if (sk == NULL) return 0; 612 613 sock_hold(sk); 614 sock_orphan(sk); 615 lock_sock(sk); 616 rose = rose_sk(sk); 617 618 switch (rose->state) { 619 case ROSE_STATE_0: 620 release_sock(sk); 621 rose_disconnect(sk, 0, -1, -1); 622 lock_sock(sk); 623 rose_destroy_socket(sk); 624 break; 625 626 case ROSE_STATE_2: 627 rose->neighbour->use--; 628 release_sock(sk); 629 rose_disconnect(sk, 0, -1, -1); 630 lock_sock(sk); 631 rose_destroy_socket(sk); 632 break; 633 634 case ROSE_STATE_1: 635 case ROSE_STATE_3: 636 case ROSE_STATE_4: 637 case ROSE_STATE_5: 638 rose_clear_queues(sk); 639 rose_stop_idletimer(sk); 640 rose_write_internal(sk, ROSE_CLEAR_REQUEST); 641 rose_start_t3timer(sk); 642 rose->state = ROSE_STATE_2; 643 sk->sk_state = TCP_CLOSE; 644 sk->sk_shutdown |= SEND_SHUTDOWN; 645 sk->sk_state_change(sk); 646 sock_set_flag(sk, SOCK_DEAD); 647 sock_set_flag(sk, SOCK_DESTROY); 648 break; 649 650 default: 651 break; 652 } 653 654 sock->sk = NULL; 655 release_sock(sk); 656 sock_put(sk); 657 658 return 0; 659 } 660 661 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 662 { 663 struct sock *sk = sock->sk; 664 struct rose_sock *rose = rose_sk(sk); 665 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 666 struct net_device *dev; 667 ax25_address *source; 668 ax25_uid_assoc *user; 669 int n; 670 671 if (!sock_flag(sk, SOCK_ZAPPED)) 672 return -EINVAL; 673 674 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 675 return -EINVAL; 676 677 if (addr->srose_family != AF_ROSE) 678 return -EINVAL; 679 680 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 681 return -EINVAL; 682 683 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 684 return -EINVAL; 685 686 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 687 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n"); 688 return -EADDRNOTAVAIL; 689 } 690 691 source = &addr->srose_call; 692 693 user = ax25_findbyuid(current->euid); 694 if (user) { 695 rose->source_call = user->call; 696 ax25_uid_put(user); 697 } else { 698 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) 699 return -EACCES; 700 rose->source_call = *source; 701 } 702 703 rose->source_addr = addr->srose_addr; 704 rose->device = dev; 705 rose->source_ndigis = addr->srose_ndigis; 706 707 if (addr_len == sizeof(struct full_sockaddr_rose)) { 708 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 709 for (n = 0 ; n < addr->srose_ndigis ; n++) 710 rose->source_digis[n] = full_addr->srose_digis[n]; 711 } else { 712 if (rose->source_ndigis == 1) { 713 rose->source_digis[0] = addr->srose_digi; 714 } 715 } 716 717 rose_insert_socket(sk); 718 719 sock_reset_flag(sk, SOCK_ZAPPED); 720 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 721 return 0; 722 } 723 724 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) 725 { 726 struct sock *sk = sock->sk; 727 struct rose_sock *rose = rose_sk(sk); 728 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 729 unsigned char cause, diagnostic; 730 struct net_device *dev; 731 ax25_uid_assoc *user; 732 int n, err = 0; 733 734 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 735 return -EINVAL; 736 737 if (addr->srose_family != AF_ROSE) 738 return -EINVAL; 739 740 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 741 return -EINVAL; 742 743 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 744 return -EINVAL; 745 746 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ 747 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) 748 return -EINVAL; 749 750 lock_sock(sk); 751 752 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 753 /* Connect completed during a ERESTARTSYS event */ 754 sock->state = SS_CONNECTED; 755 goto out_release; 756 } 757 758 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 759 sock->state = SS_UNCONNECTED; 760 err = -ECONNREFUSED; 761 goto out_release; 762 } 763 764 if (sk->sk_state == TCP_ESTABLISHED) { 765 /* No reconnect on a seqpacket socket */ 766 err = -EISCONN; 767 goto out_release; 768 } 769 770 sk->sk_state = TCP_CLOSE; 771 sock->state = SS_UNCONNECTED; 772 773 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, 774 &diagnostic, 0); 775 if (!rose->neighbour) { 776 err = -ENETUNREACH; 777 goto out_release; 778 } 779 780 rose->lci = rose_new_lci(rose->neighbour); 781 if (!rose->lci) { 782 err = -ENETUNREACH; 783 goto out_release; 784 } 785 786 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ 787 sock_reset_flag(sk, SOCK_ZAPPED); 788 789 if ((dev = rose_dev_first()) == NULL) { 790 err = -ENETUNREACH; 791 goto out_release; 792 } 793 794 user = ax25_findbyuid(current->euid); 795 if (!user) { 796 err = -EINVAL; 797 goto out_release; 798 } 799 800 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); 801 rose->source_call = user->call; 802 rose->device = dev; 803 ax25_uid_put(user); 804 805 rose_insert_socket(sk); /* Finish the bind */ 806 } 807 rose_try_next_neigh: 808 rose->dest_addr = addr->srose_addr; 809 rose->dest_call = addr->srose_call; 810 rose->rand = ((long)rose & 0xFFFF) + rose->lci; 811 rose->dest_ndigis = addr->srose_ndigis; 812 813 if (addr_len == sizeof(struct full_sockaddr_rose)) { 814 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 815 for (n = 0 ; n < addr->srose_ndigis ; n++) 816 rose->dest_digis[n] = full_addr->srose_digis[n]; 817 } else { 818 if (rose->dest_ndigis == 1) { 819 rose->dest_digis[0] = addr->srose_digi; 820 } 821 } 822 823 /* Move to connecting socket, start sending Connect Requests */ 824 sock->state = SS_CONNECTING; 825 sk->sk_state = TCP_SYN_SENT; 826 827 rose->state = ROSE_STATE_1; 828 829 rose->neighbour->use++; 830 831 rose_write_internal(sk, ROSE_CALL_REQUEST); 832 rose_start_heartbeat(sk); 833 rose_start_t1timer(sk); 834 835 /* Now the loop */ 836 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { 837 err = -EINPROGRESS; 838 goto out_release; 839 } 840 841 /* 842 * A Connect Ack with Choke or timeout or failed routing will go to 843 * closed. 844 */ 845 if (sk->sk_state == TCP_SYN_SENT) { 846 DEFINE_WAIT(wait); 847 848 for (;;) { 849 prepare_to_wait(sk->sk_sleep, &wait, 850 TASK_INTERRUPTIBLE); 851 if (sk->sk_state != TCP_SYN_SENT) 852 break; 853 if (!signal_pending(current)) { 854 release_sock(sk); 855 schedule(); 856 lock_sock(sk); 857 continue; 858 } 859 err = -ERESTARTSYS; 860 break; 861 } 862 finish_wait(sk->sk_sleep, &wait); 863 864 if (err) 865 goto out_release; 866 } 867 868 if (sk->sk_state != TCP_ESTABLISHED) { 869 /* Try next neighbour */ 870 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); 871 if (rose->neighbour) 872 goto rose_try_next_neigh; 873 874 /* No more neighbours */ 875 sock->state = SS_UNCONNECTED; 876 err = sock_error(sk); /* Always set at this point */ 877 goto out_release; 878 } 879 880 sock->state = SS_CONNECTED; 881 882 out_release: 883 release_sock(sk); 884 885 return err; 886 } 887 888 static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 889 { 890 struct sk_buff *skb; 891 struct sock *newsk; 892 DEFINE_WAIT(wait); 893 struct sock *sk; 894 int err = 0; 895 896 if ((sk = sock->sk) == NULL) 897 return -EINVAL; 898 899 lock_sock(sk); 900 if (sk->sk_type != SOCK_SEQPACKET) { 901 err = -EOPNOTSUPP; 902 goto out_release; 903 } 904 905 if (sk->sk_state != TCP_LISTEN) { 906 err = -EINVAL; 907 goto out_release; 908 } 909 910 /* 911 * The write queue this time is holding sockets ready to use 912 * hooked into the SABM we saved 913 */ 914 for (;;) { 915 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 916 917 skb = skb_dequeue(&sk->sk_receive_queue); 918 if (skb) 919 break; 920 921 if (flags & O_NONBLOCK) { 922 err = -EWOULDBLOCK; 923 break; 924 } 925 if (!signal_pending(current)) { 926 release_sock(sk); 927 schedule(); 928 lock_sock(sk); 929 continue; 930 } 931 err = -ERESTARTSYS; 932 break; 933 } 934 finish_wait(sk->sk_sleep, &wait); 935 if (err) 936 goto out_release; 937 938 newsk = skb->sk; 939 sock_graft(newsk, newsock); 940 941 /* Now attach up the new socket */ 942 skb->sk = NULL; 943 kfree_skb(skb); 944 sk->sk_ack_backlog--; 945 946 out_release: 947 release_sock(sk); 948 949 return err; 950 } 951 952 static int rose_getname(struct socket *sock, struct sockaddr *uaddr, 953 int *uaddr_len, int peer) 954 { 955 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; 956 struct sock *sk = sock->sk; 957 struct rose_sock *rose = rose_sk(sk); 958 int n; 959 960 if (peer != 0) { 961 if (sk->sk_state != TCP_ESTABLISHED) 962 return -ENOTCONN; 963 srose->srose_family = AF_ROSE; 964 srose->srose_addr = rose->dest_addr; 965 srose->srose_call = rose->dest_call; 966 srose->srose_ndigis = rose->dest_ndigis; 967 for (n = 0; n < rose->dest_ndigis; n++) 968 srose->srose_digis[n] = rose->dest_digis[n]; 969 } else { 970 srose->srose_family = AF_ROSE; 971 srose->srose_addr = rose->source_addr; 972 srose->srose_call = rose->source_call; 973 srose->srose_ndigis = rose->source_ndigis; 974 for (n = 0; n < rose->source_ndigis; n++) 975 srose->srose_digis[n] = rose->source_digis[n]; 976 } 977 978 *uaddr_len = sizeof(struct full_sockaddr_rose); 979 return 0; 980 } 981 982 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) 983 { 984 struct sock *sk; 985 struct sock *make; 986 struct rose_sock *make_rose; 987 struct rose_facilities_struct facilities; 988 int n, len; 989 990 skb->sk = NULL; /* Initially we don't know who it's for */ 991 992 /* 993 * skb->data points to the rose frame start 994 */ 995 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); 996 997 len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; 998 len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; 999 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { 1000 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); 1001 return 0; 1002 } 1003 1004 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); 1005 1006 /* 1007 * We can't accept the Call Request. 1008 */ 1009 if (sk == NULL || sk_acceptq_is_full(sk) || 1010 (make = rose_make_new(sk)) == NULL) { 1011 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); 1012 return 0; 1013 } 1014 1015 skb->sk = make; 1016 make->sk_state = TCP_ESTABLISHED; 1017 make_rose = rose_sk(make); 1018 1019 make_rose->lci = lci; 1020 make_rose->dest_addr = facilities.dest_addr; 1021 make_rose->dest_call = facilities.dest_call; 1022 make_rose->dest_ndigis = facilities.dest_ndigis; 1023 for (n = 0 ; n < facilities.dest_ndigis ; n++) 1024 make_rose->dest_digis[n] = facilities.dest_digis[n]; 1025 make_rose->source_addr = facilities.source_addr; 1026 make_rose->source_call = facilities.source_call; 1027 make_rose->source_ndigis = facilities.source_ndigis; 1028 for (n = 0 ; n < facilities.source_ndigis ; n++) 1029 make_rose->source_digis[n]= facilities.source_digis[n]; 1030 make_rose->neighbour = neigh; 1031 make_rose->device = dev; 1032 make_rose->facilities = facilities; 1033 1034 make_rose->neighbour->use++; 1035 1036 if (rose_sk(sk)->defer) { 1037 make_rose->state = ROSE_STATE_5; 1038 } else { 1039 rose_write_internal(make, ROSE_CALL_ACCEPTED); 1040 make_rose->state = ROSE_STATE_3; 1041 rose_start_idletimer(make); 1042 } 1043 1044 make_rose->condition = 0x00; 1045 make_rose->vs = 0; 1046 make_rose->va = 0; 1047 make_rose->vr = 0; 1048 make_rose->vl = 0; 1049 sk->sk_ack_backlog++; 1050 1051 rose_insert_socket(make); 1052 1053 skb_queue_head(&sk->sk_receive_queue, skb); 1054 1055 rose_start_heartbeat(make); 1056 1057 if (!sock_flag(sk, SOCK_DEAD)) 1058 sk->sk_data_ready(sk, skb->len); 1059 1060 return 1; 1061 } 1062 1063 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, 1064 struct msghdr *msg, size_t len) 1065 { 1066 struct sock *sk = sock->sk; 1067 struct rose_sock *rose = rose_sk(sk); 1068 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; 1069 int err; 1070 struct full_sockaddr_rose srose; 1071 struct sk_buff *skb; 1072 unsigned char *asmptr; 1073 int n, size, qbit = 0; 1074 1075 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1076 return -EINVAL; 1077 1078 if (sock_flag(sk, SOCK_ZAPPED)) 1079 return -EADDRNOTAVAIL; 1080 1081 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1082 send_sig(SIGPIPE, current, 0); 1083 return -EPIPE; 1084 } 1085 1086 if (rose->neighbour == NULL || rose->device == NULL) 1087 return -ENETUNREACH; 1088 1089 if (usrose != NULL) { 1090 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) 1091 return -EINVAL; 1092 memset(&srose, 0, sizeof(struct full_sockaddr_rose)); 1093 memcpy(&srose, usrose, msg->msg_namelen); 1094 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || 1095 ax25cmp(&rose->dest_call, &srose.srose_call) != 0) 1096 return -EISCONN; 1097 if (srose.srose_ndigis != rose->dest_ndigis) 1098 return -EISCONN; 1099 if (srose.srose_ndigis == rose->dest_ndigis) { 1100 for (n = 0 ; n < srose.srose_ndigis ; n++) 1101 if (ax25cmp(&rose->dest_digis[n], 1102 &srose.srose_digis[n])) 1103 return -EISCONN; 1104 } 1105 if (srose.srose_family != AF_ROSE) 1106 return -EINVAL; 1107 } else { 1108 if (sk->sk_state != TCP_ESTABLISHED) 1109 return -ENOTCONN; 1110 1111 srose.srose_family = AF_ROSE; 1112 srose.srose_addr = rose->dest_addr; 1113 srose.srose_call = rose->dest_call; 1114 srose.srose_ndigis = rose->dest_ndigis; 1115 for (n = 0 ; n < rose->dest_ndigis ; n++) 1116 srose.srose_digis[n] = rose->dest_digis[n]; 1117 } 1118 1119 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n"); 1120 1121 /* Build a packet */ 1122 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); 1123 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; 1124 1125 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1126 return err; 1127 1128 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); 1129 1130 /* 1131 * Put the data on the end 1132 */ 1133 SOCK_DEBUG(sk, "ROSE: Appending user data\n"); 1134 1135 skb_reset_transport_header(skb); 1136 skb_put(skb, len); 1137 1138 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1139 if (err) { 1140 kfree_skb(skb); 1141 return err; 1142 } 1143 1144 /* 1145 * If the Q BIT Include socket option is in force, the first 1146 * byte of the user data is the logical value of the Q Bit. 1147 */ 1148 if (rose->qbitincl) { 1149 qbit = skb->data[0]; 1150 skb_pull(skb, 1); 1151 } 1152 1153 /* 1154 * Push down the ROSE header 1155 */ 1156 asmptr = skb_push(skb, ROSE_MIN_LEN); 1157 1158 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n"); 1159 1160 /* Build a ROSE Network header */ 1161 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1162 asmptr[1] = (rose->lci >> 0) & 0xFF; 1163 asmptr[2] = ROSE_DATA; 1164 1165 if (qbit) 1166 asmptr[0] |= ROSE_Q_BIT; 1167 1168 SOCK_DEBUG(sk, "ROSE: Built header.\n"); 1169 1170 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n"); 1171 1172 if (sk->sk_state != TCP_ESTABLISHED) { 1173 kfree_skb(skb); 1174 return -ENOTCONN; 1175 } 1176 1177 #ifdef M_BIT 1178 #define ROSE_PACLEN (256-ROSE_MIN_LEN) 1179 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { 1180 unsigned char header[ROSE_MIN_LEN]; 1181 struct sk_buff *skbn; 1182 int frontlen; 1183 int lg; 1184 1185 /* Save a copy of the Header */ 1186 skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); 1187 skb_pull(skb, ROSE_MIN_LEN); 1188 1189 frontlen = skb_headroom(skb); 1190 1191 while (skb->len > 0) { 1192 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { 1193 kfree_skb(skb); 1194 return err; 1195 } 1196 1197 skbn->sk = sk; 1198 skbn->free = 1; 1199 skbn->arp = 1; 1200 1201 skb_reserve(skbn, frontlen); 1202 1203 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; 1204 1205 /* Copy the user data */ 1206 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); 1207 skb_pull(skb, lg); 1208 1209 /* Duplicate the Header */ 1210 skb_push(skbn, ROSE_MIN_LEN); 1211 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); 1212 1213 if (skb->len > 0) 1214 skbn->data[2] |= M_BIT; 1215 1216 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ 1217 } 1218 1219 skb->free = 1; 1220 kfree_skb(skb); 1221 } else { 1222 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ 1223 } 1224 #else 1225 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ 1226 #endif 1227 1228 rose_kick(sk); 1229 1230 return len; 1231 } 1232 1233 1234 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, 1235 struct msghdr *msg, size_t size, int flags) 1236 { 1237 struct sock *sk = sock->sk; 1238 struct rose_sock *rose = rose_sk(sk); 1239 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; 1240 size_t copied; 1241 unsigned char *asmptr; 1242 struct sk_buff *skb; 1243 int n, er, qbit; 1244 1245 /* 1246 * This works for seqpacket too. The receiver has ordered the queue for 1247 * us! We do one quick check first though 1248 */ 1249 if (sk->sk_state != TCP_ESTABLISHED) 1250 return -ENOTCONN; 1251 1252 /* Now we can treat all alike */ 1253 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) 1254 return er; 1255 1256 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; 1257 1258 skb_pull(skb, ROSE_MIN_LEN); 1259 1260 if (rose->qbitincl) { 1261 asmptr = skb_push(skb, 1); 1262 *asmptr = qbit; 1263 } 1264 1265 skb_reset_transport_header(skb); 1266 copied = skb->len; 1267 1268 if (copied > size) { 1269 copied = size; 1270 msg->msg_flags |= MSG_TRUNC; 1271 } 1272 1273 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1274 1275 if (srose != NULL) { 1276 srose->srose_family = AF_ROSE; 1277 srose->srose_addr = rose->dest_addr; 1278 srose->srose_call = rose->dest_call; 1279 srose->srose_ndigis = rose->dest_ndigis; 1280 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { 1281 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; 1282 for (n = 0 ; n < rose->dest_ndigis ; n++) 1283 full_srose->srose_digis[n] = rose->dest_digis[n]; 1284 msg->msg_namelen = sizeof(struct full_sockaddr_rose); 1285 } else { 1286 if (rose->dest_ndigis >= 1) { 1287 srose->srose_ndigis = 1; 1288 srose->srose_digi = rose->dest_digis[0]; 1289 } 1290 msg->msg_namelen = sizeof(struct sockaddr_rose); 1291 } 1292 } 1293 1294 skb_free_datagram(sk, skb); 1295 1296 return copied; 1297 } 1298 1299 1300 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1301 { 1302 struct sock *sk = sock->sk; 1303 struct rose_sock *rose = rose_sk(sk); 1304 void __user *argp = (void __user *)arg; 1305 1306 switch (cmd) { 1307 case TIOCOUTQ: { 1308 long amount; 1309 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1310 if (amount < 0) 1311 amount = 0; 1312 return put_user(amount, (unsigned int __user *) argp); 1313 } 1314 1315 case TIOCINQ: { 1316 struct sk_buff *skb; 1317 long amount = 0L; 1318 /* These two are safe on a single CPU system as only user tasks fiddle here */ 1319 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1320 amount = skb->len; 1321 return put_user(amount, (unsigned int __user *) argp); 1322 } 1323 1324 case SIOCGSTAMP: 1325 return sock_get_timestamp(sk, (struct timeval __user *) argp); 1326 1327 case SIOCGSTAMPNS: 1328 return sock_get_timestampns(sk, (struct timespec __user *) argp); 1329 1330 case SIOCGIFADDR: 1331 case SIOCSIFADDR: 1332 case SIOCGIFDSTADDR: 1333 case SIOCSIFDSTADDR: 1334 case SIOCGIFBRDADDR: 1335 case SIOCSIFBRDADDR: 1336 case SIOCGIFNETMASK: 1337 case SIOCSIFNETMASK: 1338 case SIOCGIFMETRIC: 1339 case SIOCSIFMETRIC: 1340 return -EINVAL; 1341 1342 case SIOCADDRT: 1343 case SIOCDELRT: 1344 case SIOCRSCLRRT: 1345 if (!capable(CAP_NET_ADMIN)) 1346 return -EPERM; 1347 return rose_rt_ioctl(cmd, argp); 1348 1349 case SIOCRSGCAUSE: { 1350 struct rose_cause_struct rose_cause; 1351 rose_cause.cause = rose->cause; 1352 rose_cause.diagnostic = rose->diagnostic; 1353 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; 1354 } 1355 1356 case SIOCRSSCAUSE: { 1357 struct rose_cause_struct rose_cause; 1358 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) 1359 return -EFAULT; 1360 rose->cause = rose_cause.cause; 1361 rose->diagnostic = rose_cause.diagnostic; 1362 return 0; 1363 } 1364 1365 case SIOCRSSL2CALL: 1366 if (!capable(CAP_NET_ADMIN)) return -EPERM; 1367 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1368 ax25_listen_release(&rose_callsign, NULL); 1369 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) 1370 return -EFAULT; 1371 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1372 return ax25_listen_register(&rose_callsign, NULL); 1373 1374 return 0; 1375 1376 case SIOCRSGL2CALL: 1377 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; 1378 1379 case SIOCRSACCEPT: 1380 if (rose->state == ROSE_STATE_5) { 1381 rose_write_internal(sk, ROSE_CALL_ACCEPTED); 1382 rose_start_idletimer(sk); 1383 rose->condition = 0x00; 1384 rose->vs = 0; 1385 rose->va = 0; 1386 rose->vr = 0; 1387 rose->vl = 0; 1388 rose->state = ROSE_STATE_3; 1389 } 1390 return 0; 1391 1392 default: 1393 return -ENOIOCTLCMD; 1394 } 1395 1396 return 0; 1397 } 1398 1399 #ifdef CONFIG_PROC_FS 1400 static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1401 __acquires(rose_list_lock) 1402 { 1403 int i; 1404 struct sock *s; 1405 struct hlist_node *node; 1406 1407 spin_lock_bh(&rose_list_lock); 1408 if (*pos == 0) 1409 return SEQ_START_TOKEN; 1410 1411 i = 1; 1412 sk_for_each(s, node, &rose_list) { 1413 if (i == *pos) 1414 return s; 1415 ++i; 1416 } 1417 return NULL; 1418 } 1419 1420 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1421 { 1422 ++*pos; 1423 1424 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list) 1425 : sk_next((struct sock *)v); 1426 } 1427 1428 static void rose_info_stop(struct seq_file *seq, void *v) 1429 __releases(rose_list_lock) 1430 { 1431 spin_unlock_bh(&rose_list_lock); 1432 } 1433 1434 static int rose_info_show(struct seq_file *seq, void *v) 1435 { 1436 char buf[11]; 1437 1438 if (v == SEQ_START_TOKEN) 1439 seq_puts(seq, 1440 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1441 1442 else { 1443 struct sock *s = v; 1444 struct rose_sock *rose = rose_sk(s); 1445 const char *devname, *callsign; 1446 const struct net_device *dev = rose->device; 1447 1448 if (!dev) 1449 devname = "???"; 1450 else 1451 devname = dev->name; 1452 1453 seq_printf(seq, "%-10s %-9s ", 1454 rose2asc(&rose->dest_addr), 1455 ax2asc(buf, &rose->dest_call)); 1456 1457 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) 1458 callsign = "??????-?"; 1459 else 1460 callsign = ax2asc(buf, &rose->source_call); 1461 1462 seq_printf(seq, 1463 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", 1464 rose2asc(&rose->source_addr), 1465 callsign, 1466 devname, 1467 rose->lci & 0x0FFF, 1468 (rose->neighbour) ? rose->neighbour->number : 0, 1469 rose->state, 1470 rose->vs, 1471 rose->vr, 1472 rose->va, 1473 ax25_display_timer(&rose->timer) / HZ, 1474 rose->t1 / HZ, 1475 rose->t2 / HZ, 1476 rose->t3 / HZ, 1477 rose->hb / HZ, 1478 ax25_display_timer(&rose->idletimer) / (60 * HZ), 1479 rose->idle / (60 * HZ), 1480 atomic_read(&s->sk_wmem_alloc), 1481 atomic_read(&s->sk_rmem_alloc), 1482 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 1483 } 1484 1485 return 0; 1486 } 1487 1488 static const struct seq_operations rose_info_seqops = { 1489 .start = rose_info_start, 1490 .next = rose_info_next, 1491 .stop = rose_info_stop, 1492 .show = rose_info_show, 1493 }; 1494 1495 static int rose_info_open(struct inode *inode, struct file *file) 1496 { 1497 return seq_open(file, &rose_info_seqops); 1498 } 1499 1500 static const struct file_operations rose_info_fops = { 1501 .owner = THIS_MODULE, 1502 .open = rose_info_open, 1503 .read = seq_read, 1504 .llseek = seq_lseek, 1505 .release = seq_release, 1506 }; 1507 #endif /* CONFIG_PROC_FS */ 1508 1509 static struct net_proto_family rose_family_ops = { 1510 .family = PF_ROSE, 1511 .create = rose_create, 1512 .owner = THIS_MODULE, 1513 }; 1514 1515 static struct proto_ops rose_proto_ops = { 1516 .family = PF_ROSE, 1517 .owner = THIS_MODULE, 1518 .release = rose_release, 1519 .bind = rose_bind, 1520 .connect = rose_connect, 1521 .socketpair = sock_no_socketpair, 1522 .accept = rose_accept, 1523 .getname = rose_getname, 1524 .poll = datagram_poll, 1525 .ioctl = rose_ioctl, 1526 .listen = rose_listen, 1527 .shutdown = sock_no_shutdown, 1528 .setsockopt = rose_setsockopt, 1529 .getsockopt = rose_getsockopt, 1530 .sendmsg = rose_sendmsg, 1531 .recvmsg = rose_recvmsg, 1532 .mmap = sock_no_mmap, 1533 .sendpage = sock_no_sendpage, 1534 }; 1535 1536 static struct notifier_block rose_dev_notifier = { 1537 .notifier_call = rose_device_event, 1538 }; 1539 1540 static struct net_device **dev_rose; 1541 1542 static struct ax25_protocol rose_pid = { 1543 .pid = AX25_P_ROSE, 1544 .func = rose_route_frame 1545 }; 1546 1547 static struct ax25_linkfail rose_linkfail_notifier = { 1548 .func = rose_link_failed 1549 }; 1550 1551 static int __init rose_proto_init(void) 1552 { 1553 int i; 1554 int rc; 1555 1556 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { 1557 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); 1558 rc = -EINVAL; 1559 goto out; 1560 } 1561 1562 rc = proto_register(&rose_proto, 0); 1563 if (rc != 0) 1564 goto out; 1565 1566 rose_callsign = null_ax25_address; 1567 1568 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1569 if (dev_rose == NULL) { 1570 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1571 rc = -ENOMEM; 1572 goto out_proto_unregister; 1573 } 1574 1575 for (i = 0; i < rose_ndevs; i++) { 1576 struct net_device *dev; 1577 char name[IFNAMSIZ]; 1578 1579 sprintf(name, "rose%d", i); 1580 dev = alloc_netdev(sizeof(struct net_device_stats), 1581 name, rose_setup); 1582 if (!dev) { 1583 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1584 rc = -ENOMEM; 1585 goto fail; 1586 } 1587 rc = register_netdev(dev); 1588 if (rc) { 1589 printk(KERN_ERR "ROSE: netdevice registration failed\n"); 1590 free_netdev(dev); 1591 goto fail; 1592 } 1593 rose_set_lockdep_key(dev); 1594 dev_rose[i] = dev; 1595 } 1596 1597 sock_register(&rose_family_ops); 1598 register_netdevice_notifier(&rose_dev_notifier); 1599 1600 ax25_register_pid(&rose_pid); 1601 ax25_linkfail_register(&rose_linkfail_notifier); 1602 1603 #ifdef CONFIG_SYSCTL 1604 rose_register_sysctl(); 1605 #endif 1606 rose_loopback_init(); 1607 1608 rose_add_loopback_neigh(); 1609 1610 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); 1611 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); 1612 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); 1613 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); 1614 out: 1615 return rc; 1616 fail: 1617 while (--i >= 0) { 1618 unregister_netdev(dev_rose[i]); 1619 free_netdev(dev_rose[i]); 1620 } 1621 kfree(dev_rose); 1622 out_proto_unregister: 1623 proto_unregister(&rose_proto); 1624 goto out; 1625 } 1626 module_init(rose_proto_init); 1627 1628 module_param(rose_ndevs, int, 0); 1629 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); 1630 1631 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); 1632 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); 1633 MODULE_LICENSE("GPL"); 1634 MODULE_ALIAS_NETPROTO(PF_ROSE); 1635 1636 static void __exit rose_exit(void) 1637 { 1638 int i; 1639 1640 proc_net_remove(&init_net, "rose"); 1641 proc_net_remove(&init_net, "rose_neigh"); 1642 proc_net_remove(&init_net, "rose_nodes"); 1643 proc_net_remove(&init_net, "rose_routes"); 1644 rose_loopback_clear(); 1645 1646 rose_rt_free(); 1647 1648 ax25_protocol_release(AX25_P_ROSE); 1649 ax25_linkfail_release(&rose_linkfail_notifier); 1650 1651 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1652 ax25_listen_release(&rose_callsign, NULL); 1653 1654 #ifdef CONFIG_SYSCTL 1655 rose_unregister_sysctl(); 1656 #endif 1657 unregister_netdevice_notifier(&rose_dev_notifier); 1658 1659 sock_unregister(PF_ROSE); 1660 1661 for (i = 0; i < rose_ndevs; i++) { 1662 struct net_device *dev = dev_rose[i]; 1663 1664 if (dev) { 1665 unregister_netdev(dev); 1666 free_netdev(dev); 1667 } 1668 } 1669 1670 kfree(dev_rose); 1671 proto_unregister(&rose_proto); 1672 } 1673 1674 module_exit(rose_exit); 1675