1 /* 2 * X.25 Packet Layer release 002 3 * 4 * This is ALPHA test software. This code may break your machine, 5 * randomly fail to work with new releases, misbehave and/or generally 6 * screw up. It might even work. 7 * 8 * This code REQUIRES 2.1.15 or higher 9 * 10 * This module: 11 * This module is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * History 17 * X.25 001 Jonathan Naylor Started coding. 18 * X.25 002 Jonathan Naylor Centralised disconnect handling. 19 * New timer architecture. 20 * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. 21 * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of 22 * facilities negotiation and increased 23 * the throughput upper limit. 24 * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups 25 * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). 26 * Fixed x25_output() related skb leakage. 27 * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. 28 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. 29 * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN 30 * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to 31 * x25_proc.c, using seq_file 32 * 2005-04-02 Shaun Pereira Selective sub address matching 33 * with call user data 34 * 2005-04-15 Shaun Pereira Fast select with no restriction on 35 * response 36 */ 37 38 #include <linux/module.h> 39 #include <linux/capability.h> 40 #include <linux/errno.h> 41 #include <linux/kernel.h> 42 #include <linux/sched.h> 43 #include <linux/smp_lock.h> 44 #include <linux/timer.h> 45 #include <linux/string.h> 46 #include <linux/net.h> 47 #include <linux/netdevice.h> 48 #include <linux/if_arp.h> 49 #include <linux/skbuff.h> 50 #include <linux/slab.h> 51 #include <net/sock.h> 52 #include <net/tcp_states.h> 53 #include <asm/uaccess.h> 54 #include <linux/fcntl.h> 55 #include <linux/termios.h> /* For TIOCINQ/OUTQ */ 56 #include <linux/notifier.h> 57 #include <linux/init.h> 58 #include <linux/compat.h> 59 #include <linux/ctype.h> 60 61 #include <net/x25.h> 62 #include <net/compat.h> 63 64 int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20; 65 int sysctl_x25_call_request_timeout = X25_DEFAULT_T21; 66 int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; 67 int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; 68 int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; 69 int sysctl_x25_forward = 0; 70 71 HLIST_HEAD(x25_list); 72 DEFINE_RWLOCK(x25_list_lock); 73 74 static const struct proto_ops x25_proto_ops; 75 76 static struct x25_address null_x25_address = {" "}; 77 78 #ifdef CONFIG_COMPAT 79 struct compat_x25_subscrip_struct { 80 char device[200-sizeof(compat_ulong_t)]; 81 compat_ulong_t global_facil_mask; 82 compat_uint_t extended; 83 }; 84 #endif 85 86 87 int x25_parse_address_block(struct sk_buff *skb, 88 struct x25_address *called_addr, 89 struct x25_address *calling_addr) 90 { 91 unsigned char len; 92 int needed; 93 int rc; 94 95 if (skb->len < 1) { 96 /* packet has no address block */ 97 rc = 0; 98 goto empty; 99 } 100 101 len = *skb->data; 102 needed = 1 + (len >> 4) + (len & 0x0f); 103 104 if (skb->len < needed) { 105 /* packet is too short to hold the addresses it claims 106 to hold */ 107 rc = -1; 108 goto empty; 109 } 110 111 return x25_addr_ntoa(skb->data, called_addr, calling_addr); 112 113 empty: 114 *called_addr->x25_addr = 0; 115 *calling_addr->x25_addr = 0; 116 117 return rc; 118 } 119 120 121 int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, 122 struct x25_address *calling_addr) 123 { 124 unsigned int called_len, calling_len; 125 char *called, *calling; 126 unsigned int i; 127 128 called_len = (*p >> 0) & 0x0F; 129 calling_len = (*p >> 4) & 0x0F; 130 131 called = called_addr->x25_addr; 132 calling = calling_addr->x25_addr; 133 p++; 134 135 for (i = 0; i < (called_len + calling_len); i++) { 136 if (i < called_len) { 137 if (i % 2 != 0) { 138 *called++ = ((*p >> 0) & 0x0F) + '0'; 139 p++; 140 } else { 141 *called++ = ((*p >> 4) & 0x0F) + '0'; 142 } 143 } else { 144 if (i % 2 != 0) { 145 *calling++ = ((*p >> 0) & 0x0F) + '0'; 146 p++; 147 } else { 148 *calling++ = ((*p >> 4) & 0x0F) + '0'; 149 } 150 } 151 } 152 153 *called = *calling = '\0'; 154 155 return 1 + (called_len + calling_len + 1) / 2; 156 } 157 158 int x25_addr_aton(unsigned char *p, struct x25_address *called_addr, 159 struct x25_address *calling_addr) 160 { 161 unsigned int called_len, calling_len; 162 char *called, *calling; 163 int i; 164 165 called = called_addr->x25_addr; 166 calling = calling_addr->x25_addr; 167 168 called_len = strlen(called); 169 calling_len = strlen(calling); 170 171 *p++ = (calling_len << 4) | (called_len << 0); 172 173 for (i = 0; i < (called_len + calling_len); i++) { 174 if (i < called_len) { 175 if (i % 2 != 0) { 176 *p |= (*called++ - '0') << 0; 177 p++; 178 } else { 179 *p = 0x00; 180 *p |= (*called++ - '0') << 4; 181 } 182 } else { 183 if (i % 2 != 0) { 184 *p |= (*calling++ - '0') << 0; 185 p++; 186 } else { 187 *p = 0x00; 188 *p |= (*calling++ - '0') << 4; 189 } 190 } 191 } 192 193 return 1 + (called_len + calling_len + 1) / 2; 194 } 195 196 /* 197 * Socket removal during an interrupt is now safe. 198 */ 199 static void x25_remove_socket(struct sock *sk) 200 { 201 write_lock_bh(&x25_list_lock); 202 sk_del_node_init(sk); 203 write_unlock_bh(&x25_list_lock); 204 } 205 206 /* 207 * Kill all bound sockets on a dropped device. 208 */ 209 static void x25_kill_by_device(struct net_device *dev) 210 { 211 struct sock *s; 212 struct hlist_node *node; 213 214 write_lock_bh(&x25_list_lock); 215 216 sk_for_each(s, node, &x25_list) 217 if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) 218 x25_disconnect(s, ENETUNREACH, 0, 0); 219 220 write_unlock_bh(&x25_list_lock); 221 } 222 223 /* 224 * Handle device status changes. 225 */ 226 static int x25_device_event(struct notifier_block *this, unsigned long event, 227 void *ptr) 228 { 229 struct net_device *dev = ptr; 230 struct x25_neigh *nb; 231 232 if (!net_eq(dev_net(dev), &init_net)) 233 return NOTIFY_DONE; 234 235 if (dev->type == ARPHRD_X25 236 #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) 237 || dev->type == ARPHRD_ETHER 238 #endif 239 ) { 240 switch (event) { 241 case NETDEV_UP: 242 x25_link_device_up(dev); 243 break; 244 case NETDEV_GOING_DOWN: 245 nb = x25_get_neigh(dev); 246 if (nb) { 247 x25_terminate_link(nb); 248 x25_neigh_put(nb); 249 } 250 break; 251 case NETDEV_DOWN: 252 x25_kill_by_device(dev); 253 x25_route_device_down(dev); 254 x25_link_device_down(dev); 255 break; 256 } 257 } 258 259 return NOTIFY_DONE; 260 } 261 262 /* 263 * Add a socket to the bound sockets list. 264 */ 265 static void x25_insert_socket(struct sock *sk) 266 { 267 write_lock_bh(&x25_list_lock); 268 sk_add_node(sk, &x25_list); 269 write_unlock_bh(&x25_list_lock); 270 } 271 272 /* 273 * Find a socket that wants to accept the Call Request we just 274 * received. Check the full list for an address/cud match. 275 * If no cuds match return the next_best thing, an address match. 276 * Note: if a listening socket has cud set it must only get calls 277 * with matching cud. 278 */ 279 static struct sock *x25_find_listener(struct x25_address *addr, 280 struct sk_buff *skb) 281 { 282 struct sock *s; 283 struct sock *next_best; 284 struct hlist_node *node; 285 286 read_lock_bh(&x25_list_lock); 287 next_best = NULL; 288 289 sk_for_each(s, node, &x25_list) 290 if ((!strcmp(addr->x25_addr, 291 x25_sk(s)->source_addr.x25_addr) || 292 !strcmp(addr->x25_addr, 293 null_x25_address.x25_addr)) && 294 s->sk_state == TCP_LISTEN) { 295 /* 296 * Found a listening socket, now check the incoming 297 * call user data vs this sockets call user data 298 */ 299 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { 300 if((memcmp(x25_sk(s)->calluserdata.cuddata, 301 skb->data, 302 x25_sk(s)->cudmatchlength)) == 0) { 303 sock_hold(s); 304 goto found; 305 } 306 } else 307 next_best = s; 308 } 309 if (next_best) { 310 s = next_best; 311 sock_hold(s); 312 goto found; 313 } 314 s = NULL; 315 found: 316 read_unlock_bh(&x25_list_lock); 317 return s; 318 } 319 320 /* 321 * Find a connected X.25 socket given my LCI and neighbour. 322 */ 323 static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) 324 { 325 struct sock *s; 326 struct hlist_node *node; 327 328 sk_for_each(s, node, &x25_list) 329 if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { 330 sock_hold(s); 331 goto found; 332 } 333 s = NULL; 334 found: 335 return s; 336 } 337 338 struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb) 339 { 340 struct sock *s; 341 342 read_lock_bh(&x25_list_lock); 343 s = __x25_find_socket(lci, nb); 344 read_unlock_bh(&x25_list_lock); 345 return s; 346 } 347 348 /* 349 * Find a unique LCI for a given device. 350 */ 351 static unsigned int x25_new_lci(struct x25_neigh *nb) 352 { 353 unsigned int lci = 1; 354 struct sock *sk; 355 356 read_lock_bh(&x25_list_lock); 357 358 while ((sk = __x25_find_socket(lci, nb)) != NULL) { 359 sock_put(sk); 360 if (++lci == 4096) { 361 lci = 0; 362 break; 363 } 364 } 365 366 read_unlock_bh(&x25_list_lock); 367 return lci; 368 } 369 370 /* 371 * Deferred destroy. 372 */ 373 static void __x25_destroy_socket(struct sock *); 374 375 /* 376 * handler for deferred kills. 377 */ 378 static void x25_destroy_timer(unsigned long data) 379 { 380 x25_destroy_socket_from_timer((struct sock *)data); 381 } 382 383 /* 384 * This is called from user mode and the timers. Thus it protects itself 385 * against interrupt users but doesn't worry about being called during 386 * work. Once it is removed from the queue no interrupt or bottom half 387 * will touch it and we are (fairly 8-) ) safe. 388 * Not static as it's used by the timer 389 */ 390 static void __x25_destroy_socket(struct sock *sk) 391 { 392 struct sk_buff *skb; 393 394 x25_stop_heartbeat(sk); 395 x25_stop_timer(sk); 396 397 x25_remove_socket(sk); 398 x25_clear_queues(sk); /* Flush the queues */ 399 400 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 401 if (skb->sk != sk) { /* A pending connection */ 402 /* 403 * Queue the unaccepted socket for death 404 */ 405 skb->sk->sk_state = TCP_LISTEN; 406 sock_set_flag(skb->sk, SOCK_DEAD); 407 x25_start_heartbeat(skb->sk); 408 x25_sk(skb->sk)->state = X25_STATE_0; 409 } 410 411 kfree_skb(skb); 412 } 413 414 if (sk_has_allocations(sk)) { 415 /* Defer: outstanding buffers */ 416 sk->sk_timer.expires = jiffies + 10 * HZ; 417 sk->sk_timer.function = x25_destroy_timer; 418 sk->sk_timer.data = (unsigned long)sk; 419 add_timer(&sk->sk_timer); 420 } else { 421 /* drop last reference so sock_put will free */ 422 __sock_put(sk); 423 } 424 } 425 426 void x25_destroy_socket_from_timer(struct sock *sk) 427 { 428 sock_hold(sk); 429 bh_lock_sock(sk); 430 __x25_destroy_socket(sk); 431 bh_unlock_sock(sk); 432 sock_put(sk); 433 } 434 435 static void x25_destroy_socket(struct sock *sk) 436 { 437 sock_hold(sk); 438 lock_sock(sk); 439 __x25_destroy_socket(sk); 440 release_sock(sk); 441 sock_put(sk); 442 } 443 444 /* 445 * Handling for system calls applied via the various interfaces to a 446 * X.25 socket object. 447 */ 448 449 static int x25_setsockopt(struct socket *sock, int level, int optname, 450 char __user *optval, unsigned int optlen) 451 { 452 int opt; 453 struct sock *sk = sock->sk; 454 int rc = -ENOPROTOOPT; 455 456 if (level != SOL_X25 || optname != X25_QBITINCL) 457 goto out; 458 459 rc = -EINVAL; 460 if (optlen < sizeof(int)) 461 goto out; 462 463 rc = -EFAULT; 464 if (get_user(opt, (int __user *)optval)) 465 goto out; 466 467 if (opt) 468 set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); 469 else 470 clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); 471 rc = 0; 472 out: 473 return rc; 474 } 475 476 static int x25_getsockopt(struct socket *sock, int level, int optname, 477 char __user *optval, int __user *optlen) 478 { 479 struct sock *sk = sock->sk; 480 int val, len, rc = -ENOPROTOOPT; 481 482 if (level != SOL_X25 || optname != X25_QBITINCL) 483 goto out; 484 485 rc = -EFAULT; 486 if (get_user(len, optlen)) 487 goto out; 488 489 len = min_t(unsigned int, len, sizeof(int)); 490 491 rc = -EINVAL; 492 if (len < 0) 493 goto out; 494 495 rc = -EFAULT; 496 if (put_user(len, optlen)) 497 goto out; 498 499 val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); 500 rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; 501 out: 502 return rc; 503 } 504 505 static int x25_listen(struct socket *sock, int backlog) 506 { 507 struct sock *sk = sock->sk; 508 int rc = -EOPNOTSUPP; 509 510 lock_sock(sk); 511 if (sk->sk_state != TCP_LISTEN) { 512 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); 513 sk->sk_max_ack_backlog = backlog; 514 sk->sk_state = TCP_LISTEN; 515 rc = 0; 516 } 517 release_sock(sk); 518 519 return rc; 520 } 521 522 static struct proto x25_proto = { 523 .name = "X25", 524 .owner = THIS_MODULE, 525 .obj_size = sizeof(struct x25_sock), 526 }; 527 528 static struct sock *x25_alloc_socket(struct net *net) 529 { 530 struct x25_sock *x25; 531 struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); 532 533 if (!sk) 534 goto out; 535 536 sock_init_data(NULL, sk); 537 538 x25 = x25_sk(sk); 539 skb_queue_head_init(&x25->ack_queue); 540 skb_queue_head_init(&x25->fragment_queue); 541 skb_queue_head_init(&x25->interrupt_in_queue); 542 skb_queue_head_init(&x25->interrupt_out_queue); 543 out: 544 return sk; 545 } 546 547 static int x25_create(struct net *net, struct socket *sock, int protocol, 548 int kern) 549 { 550 struct sock *sk; 551 struct x25_sock *x25; 552 int rc = -EAFNOSUPPORT; 553 554 if (!net_eq(net, &init_net)) 555 goto out; 556 557 rc = -ESOCKTNOSUPPORT; 558 if (sock->type != SOCK_SEQPACKET) 559 goto out; 560 561 rc = -EINVAL; 562 if (protocol) 563 goto out; 564 565 rc = -ENOBUFS; 566 if ((sk = x25_alloc_socket(net)) == NULL) 567 goto out; 568 569 x25 = x25_sk(sk); 570 571 sock_init_data(sock, sk); 572 573 x25_init_timers(sk); 574 575 sock->ops = &x25_proto_ops; 576 sk->sk_protocol = protocol; 577 sk->sk_backlog_rcv = x25_backlog_rcv; 578 579 x25->t21 = sysctl_x25_call_request_timeout; 580 x25->t22 = sysctl_x25_reset_request_timeout; 581 x25->t23 = sysctl_x25_clear_request_timeout; 582 x25->t2 = sysctl_x25_ack_holdback_timeout; 583 x25->state = X25_STATE_0; 584 x25->cudmatchlength = 0; 585 set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ 586 /* on call accept */ 587 588 x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; 589 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; 590 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; 591 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 592 x25->facilities.throughput = 0; /* by default don't negotiate 593 throughput */ 594 x25->facilities.reverse = X25_DEFAULT_REVERSE; 595 x25->dte_facilities.calling_len = 0; 596 x25->dte_facilities.called_len = 0; 597 memset(x25->dte_facilities.called_ae, '\0', 598 sizeof(x25->dte_facilities.called_ae)); 599 memset(x25->dte_facilities.calling_ae, '\0', 600 sizeof(x25->dte_facilities.calling_ae)); 601 602 rc = 0; 603 out: 604 return rc; 605 } 606 607 static struct sock *x25_make_new(struct sock *osk) 608 { 609 struct sock *sk = NULL; 610 struct x25_sock *x25, *ox25; 611 612 if (osk->sk_type != SOCK_SEQPACKET) 613 goto out; 614 615 if ((sk = x25_alloc_socket(sock_net(osk))) == NULL) 616 goto out; 617 618 x25 = x25_sk(sk); 619 620 sk->sk_type = osk->sk_type; 621 sk->sk_priority = osk->sk_priority; 622 sk->sk_protocol = osk->sk_protocol; 623 sk->sk_rcvbuf = osk->sk_rcvbuf; 624 sk->sk_sndbuf = osk->sk_sndbuf; 625 sk->sk_state = TCP_ESTABLISHED; 626 sk->sk_backlog_rcv = osk->sk_backlog_rcv; 627 sock_copy_flags(sk, osk); 628 629 ox25 = x25_sk(osk); 630 x25->t21 = ox25->t21; 631 x25->t22 = ox25->t22; 632 x25->t23 = ox25->t23; 633 x25->t2 = ox25->t2; 634 x25->flags = ox25->flags; 635 x25->facilities = ox25->facilities; 636 x25->dte_facilities = ox25->dte_facilities; 637 x25->cudmatchlength = ox25->cudmatchlength; 638 639 clear_bit(X25_INTERRUPT_FLAG, &x25->flags); 640 x25_init_timers(sk); 641 out: 642 return sk; 643 } 644 645 static int x25_release(struct socket *sock) 646 { 647 struct sock *sk = sock->sk; 648 struct x25_sock *x25; 649 650 lock_kernel(); 651 if (!sk) 652 goto out; 653 654 x25 = x25_sk(sk); 655 656 switch (x25->state) { 657 658 case X25_STATE_0: 659 case X25_STATE_2: 660 x25_disconnect(sk, 0, 0, 0); 661 x25_destroy_socket(sk); 662 goto out; 663 664 case X25_STATE_1: 665 case X25_STATE_3: 666 case X25_STATE_4: 667 x25_clear_queues(sk); 668 x25_write_internal(sk, X25_CLEAR_REQUEST); 669 x25_start_t23timer(sk); 670 x25->state = X25_STATE_2; 671 sk->sk_state = TCP_CLOSE; 672 sk->sk_shutdown |= SEND_SHUTDOWN; 673 sk->sk_state_change(sk); 674 sock_set_flag(sk, SOCK_DEAD); 675 sock_set_flag(sk, SOCK_DESTROY); 676 break; 677 } 678 679 sock_orphan(sk); 680 out: 681 unlock_kernel(); 682 return 0; 683 } 684 685 static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 686 { 687 struct sock *sk = sock->sk; 688 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 689 int len, i, rc = 0; 690 691 if (!sock_flag(sk, SOCK_ZAPPED) || 692 addr_len != sizeof(struct sockaddr_x25) || 693 addr->sx25_family != AF_X25) { 694 rc = -EINVAL; 695 goto out; 696 } 697 698 len = strlen(addr->sx25_addr.x25_addr); 699 for (i = 0; i < len; i++) { 700 if (!isdigit(addr->sx25_addr.x25_addr[i])) { 701 rc = -EINVAL; 702 goto out; 703 } 704 } 705 706 lock_sock(sk); 707 x25_sk(sk)->source_addr = addr->sx25_addr; 708 x25_insert_socket(sk); 709 sock_reset_flag(sk, SOCK_ZAPPED); 710 release_sock(sk); 711 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 712 out: 713 return rc; 714 } 715 716 static int x25_wait_for_connection_establishment(struct sock *sk) 717 { 718 DECLARE_WAITQUEUE(wait, current); 719 int rc; 720 721 add_wait_queue_exclusive(sk_sleep(sk), &wait); 722 for (;;) { 723 __set_current_state(TASK_INTERRUPTIBLE); 724 rc = -ERESTARTSYS; 725 if (signal_pending(current)) 726 break; 727 rc = sock_error(sk); 728 if (rc) { 729 sk->sk_socket->state = SS_UNCONNECTED; 730 break; 731 } 732 rc = 0; 733 if (sk->sk_state != TCP_ESTABLISHED) { 734 release_sock(sk); 735 schedule(); 736 lock_sock(sk); 737 } else 738 break; 739 } 740 __set_current_state(TASK_RUNNING); 741 remove_wait_queue(sk_sleep(sk), &wait); 742 return rc; 743 } 744 745 static int x25_connect(struct socket *sock, struct sockaddr *uaddr, 746 int addr_len, int flags) 747 { 748 struct sock *sk = sock->sk; 749 struct x25_sock *x25 = x25_sk(sk); 750 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 751 struct x25_route *rt; 752 int rc = 0; 753 754 lock_sock(sk); 755 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 756 sock->state = SS_CONNECTED; 757 goto out; /* Connect completed during a ERESTARTSYS event */ 758 } 759 760 rc = -ECONNREFUSED; 761 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 762 sock->state = SS_UNCONNECTED; 763 goto out; 764 } 765 766 rc = -EISCONN; /* No reconnect on a seqpacket socket */ 767 if (sk->sk_state == TCP_ESTABLISHED) 768 goto out; 769 770 sk->sk_state = TCP_CLOSE; 771 sock->state = SS_UNCONNECTED; 772 773 rc = -EINVAL; 774 if (addr_len != sizeof(struct sockaddr_x25) || 775 addr->sx25_family != AF_X25) 776 goto out; 777 778 rc = -ENETUNREACH; 779 rt = x25_get_route(&addr->sx25_addr); 780 if (!rt) 781 goto out; 782 783 x25->neighbour = x25_get_neigh(rt->dev); 784 if (!x25->neighbour) 785 goto out_put_route; 786 787 x25_limit_facilities(&x25->facilities, x25->neighbour); 788 789 x25->lci = x25_new_lci(x25->neighbour); 790 if (!x25->lci) 791 goto out_put_neigh; 792 793 rc = -EINVAL; 794 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ 795 goto out_put_neigh; 796 797 if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr)) 798 memset(&x25->source_addr, '\0', X25_ADDR_LEN); 799 800 x25->dest_addr = addr->sx25_addr; 801 802 /* Move to connecting socket, start sending Connect Requests */ 803 sock->state = SS_CONNECTING; 804 sk->sk_state = TCP_SYN_SENT; 805 806 x25->state = X25_STATE_1; 807 808 x25_write_internal(sk, X25_CALL_REQUEST); 809 810 x25_start_heartbeat(sk); 811 x25_start_t21timer(sk); 812 813 /* Now the loop */ 814 rc = -EINPROGRESS; 815 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) 816 goto out_put_neigh; 817 818 rc = x25_wait_for_connection_establishment(sk); 819 if (rc) 820 goto out_put_neigh; 821 822 sock->state = SS_CONNECTED; 823 rc = 0; 824 out_put_neigh: 825 if (rc) 826 x25_neigh_put(x25->neighbour); 827 out_put_route: 828 x25_route_put(rt); 829 out: 830 release_sock(sk); 831 return rc; 832 } 833 834 static int x25_wait_for_data(struct sock *sk, long timeout) 835 { 836 DECLARE_WAITQUEUE(wait, current); 837 int rc = 0; 838 839 add_wait_queue_exclusive(sk_sleep(sk), &wait); 840 for (;;) { 841 __set_current_state(TASK_INTERRUPTIBLE); 842 if (sk->sk_shutdown & RCV_SHUTDOWN) 843 break; 844 rc = -ERESTARTSYS; 845 if (signal_pending(current)) 846 break; 847 rc = -EAGAIN; 848 if (!timeout) 849 break; 850 rc = 0; 851 if (skb_queue_empty(&sk->sk_receive_queue)) { 852 release_sock(sk); 853 timeout = schedule_timeout(timeout); 854 lock_sock(sk); 855 } else 856 break; 857 } 858 __set_current_state(TASK_RUNNING); 859 remove_wait_queue(sk_sleep(sk), &wait); 860 return rc; 861 } 862 863 static int x25_accept(struct socket *sock, struct socket *newsock, int flags) 864 { 865 struct sock *sk = sock->sk; 866 struct sock *newsk; 867 struct sk_buff *skb; 868 int rc = -EINVAL; 869 870 if (!sk) 871 goto out; 872 873 rc = -EOPNOTSUPP; 874 if (sk->sk_type != SOCK_SEQPACKET) 875 goto out; 876 877 lock_sock(sk); 878 rc = -EINVAL; 879 if (sk->sk_state != TCP_LISTEN) 880 goto out2; 881 882 rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); 883 if (rc) 884 goto out2; 885 skb = skb_dequeue(&sk->sk_receive_queue); 886 rc = -EINVAL; 887 if (!skb->sk) 888 goto out2; 889 newsk = skb->sk; 890 sock_graft(newsk, newsock); 891 892 /* Now attach up the new socket */ 893 skb->sk = NULL; 894 kfree_skb(skb); 895 sk->sk_ack_backlog--; 896 newsock->state = SS_CONNECTED; 897 rc = 0; 898 out2: 899 release_sock(sk); 900 out: 901 return rc; 902 } 903 904 static int x25_getname(struct socket *sock, struct sockaddr *uaddr, 905 int *uaddr_len, int peer) 906 { 907 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; 908 struct sock *sk = sock->sk; 909 struct x25_sock *x25 = x25_sk(sk); 910 int rc = 0; 911 912 if (peer) { 913 if (sk->sk_state != TCP_ESTABLISHED) { 914 rc = -ENOTCONN; 915 goto out; 916 } 917 sx25->sx25_addr = x25->dest_addr; 918 } else 919 sx25->sx25_addr = x25->source_addr; 920 921 sx25->sx25_family = AF_X25; 922 *uaddr_len = sizeof(*sx25); 923 924 out: 925 return rc; 926 } 927 928 int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, 929 unsigned int lci) 930 { 931 struct sock *sk; 932 struct sock *make; 933 struct x25_sock *makex25; 934 struct x25_address source_addr, dest_addr; 935 struct x25_facilities facilities; 936 struct x25_dte_facilities dte_facilities; 937 int len, addr_len, rc; 938 939 /* 940 * Remove the LCI and frame type. 941 */ 942 skb_pull(skb, X25_STD_MIN_LEN); 943 944 /* 945 * Extract the X.25 addresses and convert them to ASCII strings, 946 * and remove them. 947 * 948 * Address block is mandatory in call request packets 949 */ 950 addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); 951 if (addr_len <= 0) 952 goto out_clear_request; 953 skb_pull(skb, addr_len); 954 955 /* 956 * Get the length of the facilities, skip past them for the moment 957 * get the call user data because this is needed to determine 958 * the correct listener 959 * 960 * Facilities length is mandatory in call request packets 961 */ 962 if (skb->len < 1) 963 goto out_clear_request; 964 len = skb->data[0] + 1; 965 if (skb->len < len) 966 goto out_clear_request; 967 skb_pull(skb,len); 968 969 /* 970 * Find a listener for the particular address/cud pair. 971 */ 972 sk = x25_find_listener(&source_addr,skb); 973 skb_push(skb,len); 974 975 if (sk != NULL && sk_acceptq_is_full(sk)) { 976 goto out_sock_put; 977 } 978 979 /* 980 * We dont have any listeners for this incoming call. 981 * Try forwarding it. 982 */ 983 if (sk == NULL) { 984 skb_push(skb, addr_len + X25_STD_MIN_LEN); 985 if (sysctl_x25_forward && 986 x25_forward_call(&dest_addr, nb, skb, lci) > 0) 987 { 988 /* Call was forwarded, dont process it any more */ 989 kfree_skb(skb); 990 rc = 1; 991 goto out; 992 } else { 993 /* No listeners, can't forward, clear the call */ 994 goto out_clear_request; 995 } 996 } 997 998 /* 999 * Try to reach a compromise on the requested facilities. 1000 */ 1001 len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); 1002 if (len == -1) 1003 goto out_sock_put; 1004 1005 /* 1006 * current neighbour/link might impose additional limits 1007 * on certain facilties 1008 */ 1009 1010 x25_limit_facilities(&facilities, nb); 1011 1012 /* 1013 * Try to create a new socket. 1014 */ 1015 make = x25_make_new(sk); 1016 if (!make) 1017 goto out_sock_put; 1018 1019 /* 1020 * Remove the facilities 1021 */ 1022 skb_pull(skb, len); 1023 1024 skb->sk = make; 1025 make->sk_state = TCP_ESTABLISHED; 1026 1027 makex25 = x25_sk(make); 1028 makex25->lci = lci; 1029 makex25->dest_addr = dest_addr; 1030 makex25->source_addr = source_addr; 1031 makex25->neighbour = nb; 1032 makex25->facilities = facilities; 1033 makex25->dte_facilities= dte_facilities; 1034 makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; 1035 /* ensure no reverse facil on accept */ 1036 makex25->vc_facil_mask &= ~X25_MASK_REVERSE; 1037 /* ensure no calling address extension on accept */ 1038 makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; 1039 makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; 1040 1041 /* Normally all calls are accepted immediately */ 1042 if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { 1043 x25_write_internal(make, X25_CALL_ACCEPTED); 1044 makex25->state = X25_STATE_3; 1045 } 1046 1047 /* 1048 * Incoming Call User Data. 1049 */ 1050 skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); 1051 makex25->calluserdata.cudlength = skb->len; 1052 1053 sk->sk_ack_backlog++; 1054 1055 x25_insert_socket(make); 1056 1057 skb_queue_head(&sk->sk_receive_queue, skb); 1058 1059 x25_start_heartbeat(make); 1060 1061 if (!sock_flag(sk, SOCK_DEAD)) 1062 sk->sk_data_ready(sk, skb->len); 1063 rc = 1; 1064 sock_put(sk); 1065 out: 1066 return rc; 1067 out_sock_put: 1068 sock_put(sk); 1069 out_clear_request: 1070 rc = 0; 1071 x25_transmit_clear_request(nb, lci, 0x01); 1072 goto out; 1073 } 1074 1075 static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, 1076 struct msghdr *msg, size_t len) 1077 { 1078 struct sock *sk = sock->sk; 1079 struct x25_sock *x25 = x25_sk(sk); 1080 struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name; 1081 struct sockaddr_x25 sx25; 1082 struct sk_buff *skb; 1083 unsigned char *asmptr; 1084 int noblock = msg->msg_flags & MSG_DONTWAIT; 1085 size_t size; 1086 int qbit = 0, rc = -EINVAL; 1087 1088 lock_kernel(); 1089 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) 1090 goto out; 1091 1092 /* we currently don't support segmented records at the user interface */ 1093 if (!(msg->msg_flags & (MSG_EOR|MSG_OOB))) 1094 goto out; 1095 1096 rc = -EADDRNOTAVAIL; 1097 if (sock_flag(sk, SOCK_ZAPPED)) 1098 goto out; 1099 1100 rc = -EPIPE; 1101 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1102 send_sig(SIGPIPE, current, 0); 1103 goto out; 1104 } 1105 1106 rc = -ENETUNREACH; 1107 if (!x25->neighbour) 1108 goto out; 1109 1110 if (usx25) { 1111 rc = -EINVAL; 1112 if (msg->msg_namelen < sizeof(sx25)) 1113 goto out; 1114 memcpy(&sx25, usx25, sizeof(sx25)); 1115 rc = -EISCONN; 1116 if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr)) 1117 goto out; 1118 rc = -EINVAL; 1119 if (sx25.sx25_family != AF_X25) 1120 goto out; 1121 } else { 1122 /* 1123 * FIXME 1003.1g - if the socket is like this because 1124 * it has become closed (not started closed) we ought 1125 * to SIGPIPE, EPIPE; 1126 */ 1127 rc = -ENOTCONN; 1128 if (sk->sk_state != TCP_ESTABLISHED) 1129 goto out; 1130 1131 sx25.sx25_family = AF_X25; 1132 sx25.sx25_addr = x25->dest_addr; 1133 } 1134 1135 /* Sanity check the packet size */ 1136 if (len > 65535) { 1137 rc = -EMSGSIZE; 1138 goto out; 1139 } 1140 1141 SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); 1142 1143 /* Build a packet */ 1144 SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n"); 1145 1146 if ((msg->msg_flags & MSG_OOB) && len > 32) 1147 len = 32; 1148 1149 size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN; 1150 1151 skb = sock_alloc_send_skb(sk, size, noblock, &rc); 1152 if (!skb) 1153 goto out; 1154 X25_SKB_CB(skb)->flags = msg->msg_flags; 1155 1156 skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN); 1157 1158 /* 1159 * Put the data on the end 1160 */ 1161 SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n"); 1162 1163 skb_reset_transport_header(skb); 1164 skb_put(skb, len); 1165 1166 rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1167 if (rc) 1168 goto out_kfree_skb; 1169 1170 /* 1171 * If the Q BIT Include socket option is in force, the first 1172 * byte of the user data is the logical value of the Q Bit. 1173 */ 1174 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1175 qbit = skb->data[0]; 1176 skb_pull(skb, 1); 1177 } 1178 1179 /* 1180 * Push down the X.25 header 1181 */ 1182 SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n"); 1183 1184 if (msg->msg_flags & MSG_OOB) { 1185 if (x25->neighbour->extended) { 1186 asmptr = skb_push(skb, X25_STD_MIN_LEN); 1187 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; 1188 *asmptr++ = (x25->lci >> 0) & 0xFF; 1189 *asmptr++ = X25_INTERRUPT; 1190 } else { 1191 asmptr = skb_push(skb, X25_STD_MIN_LEN); 1192 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; 1193 *asmptr++ = (x25->lci >> 0) & 0xFF; 1194 *asmptr++ = X25_INTERRUPT; 1195 } 1196 } else { 1197 if (x25->neighbour->extended) { 1198 /* Build an Extended X.25 header */ 1199 asmptr = skb_push(skb, X25_EXT_MIN_LEN); 1200 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; 1201 *asmptr++ = (x25->lci >> 0) & 0xFF; 1202 *asmptr++ = X25_DATA; 1203 *asmptr++ = X25_DATA; 1204 } else { 1205 /* Build an Standard X.25 header */ 1206 asmptr = skb_push(skb, X25_STD_MIN_LEN); 1207 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; 1208 *asmptr++ = (x25->lci >> 0) & 0xFF; 1209 *asmptr++ = X25_DATA; 1210 } 1211 1212 if (qbit) 1213 skb->data[0] |= X25_Q_BIT; 1214 } 1215 1216 SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n"); 1217 SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n"); 1218 1219 rc = -ENOTCONN; 1220 if (sk->sk_state != TCP_ESTABLISHED) 1221 goto out_kfree_skb; 1222 1223 if (msg->msg_flags & MSG_OOB) 1224 skb_queue_tail(&x25->interrupt_out_queue, skb); 1225 else { 1226 rc = x25_output(sk, skb); 1227 len = rc; 1228 if (rc < 0) 1229 kfree_skb(skb); 1230 else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) 1231 len++; 1232 } 1233 1234 /* 1235 * lock_sock() is currently only used to serialize this x25_kick() 1236 * against input-driven x25_kick() calls. It currently only blocks 1237 * incoming packets for this socket and does not protect against 1238 * any other socket state changes and is not called from anywhere 1239 * else. As x25_kick() cannot block and as long as all socket 1240 * operations are BKL-wrapped, we don't need take to care about 1241 * purging the backlog queue in x25_release(). 1242 * 1243 * Using lock_sock() to protect all socket operations entirely 1244 * (and making the whole x25 stack SMP aware) unfortunately would 1245 * require major changes to {send,recv}msg and skb allocation methods. 1246 * -> 2.5 ;) 1247 */ 1248 lock_sock(sk); 1249 x25_kick(sk); 1250 release_sock(sk); 1251 rc = len; 1252 out: 1253 unlock_kernel(); 1254 return rc; 1255 out_kfree_skb: 1256 kfree_skb(skb); 1257 goto out; 1258 } 1259 1260 1261 static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, 1262 struct msghdr *msg, size_t size, 1263 int flags) 1264 { 1265 struct sock *sk = sock->sk; 1266 struct x25_sock *x25 = x25_sk(sk); 1267 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; 1268 size_t copied; 1269 int qbit; 1270 struct sk_buff *skb; 1271 unsigned char *asmptr; 1272 int rc = -ENOTCONN; 1273 1274 lock_kernel(); 1275 /* 1276 * This works for seqpacket too. The receiver has ordered the queue for 1277 * us! We do one quick check first though 1278 */ 1279 if (sk->sk_state != TCP_ESTABLISHED) 1280 goto out; 1281 1282 if (flags & MSG_OOB) { 1283 rc = -EINVAL; 1284 if (sock_flag(sk, SOCK_URGINLINE) || 1285 !skb_peek(&x25->interrupt_in_queue)) 1286 goto out; 1287 1288 skb = skb_dequeue(&x25->interrupt_in_queue); 1289 1290 skb_pull(skb, X25_STD_MIN_LEN); 1291 1292 /* 1293 * No Q bit information on Interrupt data. 1294 */ 1295 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1296 asmptr = skb_push(skb, 1); 1297 *asmptr = 0x00; 1298 } 1299 1300 msg->msg_flags |= MSG_OOB; 1301 } else { 1302 /* Now we can treat all alike */ 1303 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1304 flags & MSG_DONTWAIT, &rc); 1305 if (!skb) 1306 goto out; 1307 1308 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; 1309 1310 skb_pull(skb, x25->neighbour->extended ? 1311 X25_EXT_MIN_LEN : X25_STD_MIN_LEN); 1312 1313 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1314 asmptr = skb_push(skb, 1); 1315 *asmptr = qbit; 1316 } 1317 } 1318 1319 skb_reset_transport_header(skb); 1320 copied = skb->len; 1321 1322 if (copied > size) { 1323 copied = size; 1324 msg->msg_flags |= MSG_TRUNC; 1325 } 1326 1327 /* Currently, each datagram always contains a complete record */ 1328 msg->msg_flags |= MSG_EOR; 1329 1330 rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1331 if (rc) 1332 goto out_free_dgram; 1333 1334 if (sx25) { 1335 sx25->sx25_family = AF_X25; 1336 sx25->sx25_addr = x25->dest_addr; 1337 } 1338 1339 msg->msg_namelen = sizeof(struct sockaddr_x25); 1340 1341 lock_sock(sk); 1342 x25_check_rbuf(sk); 1343 release_sock(sk); 1344 rc = copied; 1345 out_free_dgram: 1346 skb_free_datagram(sk, skb); 1347 out: 1348 unlock_kernel(); 1349 return rc; 1350 } 1351 1352 1353 static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1354 { 1355 struct sock *sk = sock->sk; 1356 struct x25_sock *x25 = x25_sk(sk); 1357 void __user *argp = (void __user *)arg; 1358 int rc; 1359 1360 switch (cmd) { 1361 case TIOCOUTQ: { 1362 int amount; 1363 1364 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1365 if (amount < 0) 1366 amount = 0; 1367 rc = put_user(amount, (unsigned int __user *)argp); 1368 break; 1369 } 1370 1371 case TIOCINQ: { 1372 struct sk_buff *skb; 1373 int amount = 0; 1374 /* 1375 * These two are safe on a single CPU system as 1376 * only user tasks fiddle here 1377 */ 1378 lock_sock(sk); 1379 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1380 amount = skb->len; 1381 release_sock(sk); 1382 rc = put_user(amount, (unsigned int __user *)argp); 1383 break; 1384 } 1385 1386 case SIOCGSTAMP: 1387 rc = -EINVAL; 1388 if (sk) 1389 rc = sock_get_timestamp(sk, 1390 (struct timeval __user *)argp); 1391 break; 1392 case SIOCGSTAMPNS: 1393 rc = -EINVAL; 1394 if (sk) 1395 rc = sock_get_timestampns(sk, 1396 (struct timespec __user *)argp); 1397 break; 1398 case SIOCGIFADDR: 1399 case SIOCSIFADDR: 1400 case SIOCGIFDSTADDR: 1401 case SIOCSIFDSTADDR: 1402 case SIOCGIFBRDADDR: 1403 case SIOCSIFBRDADDR: 1404 case SIOCGIFNETMASK: 1405 case SIOCSIFNETMASK: 1406 case SIOCGIFMETRIC: 1407 case SIOCSIFMETRIC: 1408 rc = -EINVAL; 1409 break; 1410 case SIOCADDRT: 1411 case SIOCDELRT: 1412 rc = -EPERM; 1413 if (!capable(CAP_NET_ADMIN)) 1414 break; 1415 rc = x25_route_ioctl(cmd, argp); 1416 break; 1417 case SIOCX25GSUBSCRIP: 1418 rc = x25_subscr_ioctl(cmd, argp); 1419 break; 1420 case SIOCX25SSUBSCRIP: 1421 rc = -EPERM; 1422 if (!capable(CAP_NET_ADMIN)) 1423 break; 1424 rc = x25_subscr_ioctl(cmd, argp); 1425 break; 1426 case SIOCX25GFACILITIES: { 1427 lock_sock(sk); 1428 rc = copy_to_user(argp, &x25->facilities, 1429 sizeof(x25->facilities)) 1430 ? -EFAULT : 0; 1431 release_sock(sk); 1432 break; 1433 } 1434 1435 case SIOCX25SFACILITIES: { 1436 struct x25_facilities facilities; 1437 rc = -EFAULT; 1438 if (copy_from_user(&facilities, argp, 1439 sizeof(facilities))) 1440 break; 1441 rc = -EINVAL; 1442 lock_sock(sk); 1443 if (sk->sk_state != TCP_LISTEN && 1444 sk->sk_state != TCP_CLOSE) 1445 goto out_fac_release; 1446 if (facilities.pacsize_in < X25_PS16 || 1447 facilities.pacsize_in > X25_PS4096) 1448 goto out_fac_release; 1449 if (facilities.pacsize_out < X25_PS16 || 1450 facilities.pacsize_out > X25_PS4096) 1451 goto out_fac_release; 1452 if (facilities.winsize_in < 1 || 1453 facilities.winsize_in > 127) 1454 goto out_fac_release; 1455 if (facilities.throughput) { 1456 int out = facilities.throughput & 0xf0; 1457 int in = facilities.throughput & 0x0f; 1458 if (!out) 1459 facilities.throughput |= 1460 X25_DEFAULT_THROUGHPUT << 4; 1461 else if (out < 0x30 || out > 0xD0) 1462 goto out_fac_release; 1463 if (!in) 1464 facilities.throughput |= 1465 X25_DEFAULT_THROUGHPUT; 1466 else if (in < 0x03 || in > 0x0D) 1467 goto out_fac_release; 1468 } 1469 if (facilities.reverse && 1470 (facilities.reverse & 0x81) != 0x81) 1471 goto out_fac_release; 1472 x25->facilities = facilities; 1473 rc = 0; 1474 out_fac_release: 1475 release_sock(sk); 1476 break; 1477 } 1478 1479 case SIOCX25GDTEFACILITIES: { 1480 lock_sock(sk); 1481 rc = copy_to_user(argp, &x25->dte_facilities, 1482 sizeof(x25->dte_facilities)); 1483 release_sock(sk); 1484 if (rc) 1485 rc = -EFAULT; 1486 break; 1487 } 1488 1489 case SIOCX25SDTEFACILITIES: { 1490 struct x25_dte_facilities dtefacs; 1491 rc = -EFAULT; 1492 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) 1493 break; 1494 rc = -EINVAL; 1495 lock_sock(sk); 1496 if (sk->sk_state != TCP_LISTEN && 1497 sk->sk_state != TCP_CLOSE) 1498 goto out_dtefac_release; 1499 if (dtefacs.calling_len > X25_MAX_AE_LEN) 1500 goto out_dtefac_release; 1501 if (dtefacs.calling_ae == NULL) 1502 goto out_dtefac_release; 1503 if (dtefacs.called_len > X25_MAX_AE_LEN) 1504 goto out_dtefac_release; 1505 if (dtefacs.called_ae == NULL) 1506 goto out_dtefac_release; 1507 x25->dte_facilities = dtefacs; 1508 rc = 0; 1509 out_dtefac_release: 1510 release_sock(sk); 1511 break; 1512 } 1513 1514 case SIOCX25GCALLUSERDATA: { 1515 lock_sock(sk); 1516 rc = copy_to_user(argp, &x25->calluserdata, 1517 sizeof(x25->calluserdata)) 1518 ? -EFAULT : 0; 1519 release_sock(sk); 1520 break; 1521 } 1522 1523 case SIOCX25SCALLUSERDATA: { 1524 struct x25_calluserdata calluserdata; 1525 1526 rc = -EFAULT; 1527 if (copy_from_user(&calluserdata, argp, 1528 sizeof(calluserdata))) 1529 break; 1530 rc = -EINVAL; 1531 if (calluserdata.cudlength > X25_MAX_CUD_LEN) 1532 break; 1533 lock_sock(sk); 1534 x25->calluserdata = calluserdata; 1535 release_sock(sk); 1536 rc = 0; 1537 break; 1538 } 1539 1540 case SIOCX25GCAUSEDIAG: { 1541 lock_sock(sk); 1542 rc = copy_to_user(argp, &x25->causediag, 1543 sizeof(x25->causediag)) 1544 ? -EFAULT : 0; 1545 release_sock(sk); 1546 break; 1547 } 1548 1549 case SIOCX25SCAUSEDIAG: { 1550 struct x25_causediag causediag; 1551 rc = -EFAULT; 1552 if (copy_from_user(&causediag, argp, sizeof(causediag))) 1553 break; 1554 lock_sock(sk); 1555 x25->causediag = causediag; 1556 release_sock(sk); 1557 rc = 0; 1558 break; 1559 1560 } 1561 1562 case SIOCX25SCUDMATCHLEN: { 1563 struct x25_subaddr sub_addr; 1564 rc = -EINVAL; 1565 lock_sock(sk); 1566 if(sk->sk_state != TCP_CLOSE) 1567 goto out_cud_release; 1568 rc = -EFAULT; 1569 if (copy_from_user(&sub_addr, argp, 1570 sizeof(sub_addr))) 1571 goto out_cud_release; 1572 rc = -EINVAL; 1573 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) 1574 goto out_cud_release; 1575 x25->cudmatchlength = sub_addr.cudmatchlength; 1576 rc = 0; 1577 out_cud_release: 1578 release_sock(sk); 1579 break; 1580 } 1581 1582 case SIOCX25CALLACCPTAPPRV: { 1583 rc = -EINVAL; 1584 lock_kernel(); 1585 if (sk->sk_state != TCP_CLOSE) 1586 break; 1587 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); 1588 unlock_kernel(); 1589 rc = 0; 1590 break; 1591 } 1592 1593 case SIOCX25SENDCALLACCPT: { 1594 rc = -EINVAL; 1595 lock_kernel(); 1596 if (sk->sk_state != TCP_ESTABLISHED) 1597 break; 1598 /* must call accptapprv above */ 1599 if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) 1600 break; 1601 x25_write_internal(sk, X25_CALL_ACCEPTED); 1602 x25->state = X25_STATE_3; 1603 unlock_kernel(); 1604 rc = 0; 1605 break; 1606 } 1607 1608 default: 1609 rc = -ENOIOCTLCMD; 1610 break; 1611 } 1612 1613 return rc; 1614 } 1615 1616 static const struct net_proto_family x25_family_ops = { 1617 .family = AF_X25, 1618 .create = x25_create, 1619 .owner = THIS_MODULE, 1620 }; 1621 1622 #ifdef CONFIG_COMPAT 1623 static int compat_x25_subscr_ioctl(unsigned int cmd, 1624 struct compat_x25_subscrip_struct __user *x25_subscr32) 1625 { 1626 struct compat_x25_subscrip_struct x25_subscr; 1627 struct x25_neigh *nb; 1628 struct net_device *dev; 1629 int rc = -EINVAL; 1630 1631 rc = -EFAULT; 1632 if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32))) 1633 goto out; 1634 1635 rc = -EINVAL; 1636 dev = x25_dev_get(x25_subscr.device); 1637 if (dev == NULL) 1638 goto out; 1639 1640 nb = x25_get_neigh(dev); 1641 if (nb == NULL) 1642 goto out_dev_put; 1643 1644 dev_put(dev); 1645 1646 if (cmd == SIOCX25GSUBSCRIP) { 1647 read_lock_bh(&x25_neigh_list_lock); 1648 x25_subscr.extended = nb->extended; 1649 x25_subscr.global_facil_mask = nb->global_facil_mask; 1650 read_unlock_bh(&x25_neigh_list_lock); 1651 rc = copy_to_user(x25_subscr32, &x25_subscr, 1652 sizeof(*x25_subscr32)) ? -EFAULT : 0; 1653 } else { 1654 rc = -EINVAL; 1655 if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { 1656 rc = 0; 1657 write_lock_bh(&x25_neigh_list_lock); 1658 nb->extended = x25_subscr.extended; 1659 nb->global_facil_mask = x25_subscr.global_facil_mask; 1660 write_unlock_bh(&x25_neigh_list_lock); 1661 } 1662 } 1663 x25_neigh_put(nb); 1664 out: 1665 return rc; 1666 out_dev_put: 1667 dev_put(dev); 1668 goto out; 1669 } 1670 1671 static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, 1672 unsigned long arg) 1673 { 1674 void __user *argp = compat_ptr(arg); 1675 struct sock *sk = sock->sk; 1676 1677 int rc = -ENOIOCTLCMD; 1678 1679 switch(cmd) { 1680 case TIOCOUTQ: 1681 case TIOCINQ: 1682 rc = x25_ioctl(sock, cmd, (unsigned long)argp); 1683 break; 1684 case SIOCGSTAMP: 1685 rc = -EINVAL; 1686 if (sk) 1687 rc = compat_sock_get_timestamp(sk, 1688 (struct timeval __user*)argp); 1689 break; 1690 case SIOCGSTAMPNS: 1691 rc = -EINVAL; 1692 if (sk) 1693 rc = compat_sock_get_timestampns(sk, 1694 (struct timespec __user*)argp); 1695 break; 1696 case SIOCGIFADDR: 1697 case SIOCSIFADDR: 1698 case SIOCGIFDSTADDR: 1699 case SIOCSIFDSTADDR: 1700 case SIOCGIFBRDADDR: 1701 case SIOCSIFBRDADDR: 1702 case SIOCGIFNETMASK: 1703 case SIOCSIFNETMASK: 1704 case SIOCGIFMETRIC: 1705 case SIOCSIFMETRIC: 1706 rc = -EINVAL; 1707 break; 1708 case SIOCADDRT: 1709 case SIOCDELRT: 1710 rc = -EPERM; 1711 if (!capable(CAP_NET_ADMIN)) 1712 break; 1713 rc = x25_route_ioctl(cmd, argp); 1714 break; 1715 case SIOCX25GSUBSCRIP: 1716 rc = compat_x25_subscr_ioctl(cmd, argp); 1717 break; 1718 case SIOCX25SSUBSCRIP: 1719 rc = -EPERM; 1720 if (!capable(CAP_NET_ADMIN)) 1721 break; 1722 rc = compat_x25_subscr_ioctl(cmd, argp); 1723 break; 1724 case SIOCX25GFACILITIES: 1725 case SIOCX25SFACILITIES: 1726 case SIOCX25GDTEFACILITIES: 1727 case SIOCX25SDTEFACILITIES: 1728 case SIOCX25GCALLUSERDATA: 1729 case SIOCX25SCALLUSERDATA: 1730 case SIOCX25GCAUSEDIAG: 1731 case SIOCX25SCAUSEDIAG: 1732 case SIOCX25SCUDMATCHLEN: 1733 case SIOCX25CALLACCPTAPPRV: 1734 case SIOCX25SENDCALLACCPT: 1735 rc = x25_ioctl(sock, cmd, (unsigned long)argp); 1736 break; 1737 default: 1738 rc = -ENOIOCTLCMD; 1739 break; 1740 } 1741 return rc; 1742 } 1743 #endif 1744 1745 static const struct proto_ops x25_proto_ops = { 1746 .family = AF_X25, 1747 .owner = THIS_MODULE, 1748 .release = x25_release, 1749 .bind = x25_bind, 1750 .connect = x25_connect, 1751 .socketpair = sock_no_socketpair, 1752 .accept = x25_accept, 1753 .getname = x25_getname, 1754 .poll = datagram_poll, 1755 .ioctl = x25_ioctl, 1756 #ifdef CONFIG_COMPAT 1757 .compat_ioctl = compat_x25_ioctl, 1758 #endif 1759 .listen = x25_listen, 1760 .shutdown = sock_no_shutdown, 1761 .setsockopt = x25_setsockopt, 1762 .getsockopt = x25_getsockopt, 1763 .sendmsg = x25_sendmsg, 1764 .recvmsg = x25_recvmsg, 1765 .mmap = sock_no_mmap, 1766 .sendpage = sock_no_sendpage, 1767 }; 1768 1769 static struct packet_type x25_packet_type __read_mostly = { 1770 .type = cpu_to_be16(ETH_P_X25), 1771 .func = x25_lapb_receive_frame, 1772 }; 1773 1774 static struct notifier_block x25_dev_notifier = { 1775 .notifier_call = x25_device_event, 1776 }; 1777 1778 void x25_kill_by_neigh(struct x25_neigh *nb) 1779 { 1780 struct sock *s; 1781 struct hlist_node *node; 1782 1783 write_lock_bh(&x25_list_lock); 1784 1785 sk_for_each(s, node, &x25_list) 1786 if (x25_sk(s)->neighbour == nb) 1787 x25_disconnect(s, ENETUNREACH, 0, 0); 1788 1789 write_unlock_bh(&x25_list_lock); 1790 1791 /* Remove any related forwards */ 1792 x25_clear_forward_by_dev(nb->dev); 1793 } 1794 1795 static int __init x25_init(void) 1796 { 1797 int rc = proto_register(&x25_proto, 0); 1798 1799 if (rc != 0) 1800 goto out; 1801 1802 rc = sock_register(&x25_family_ops); 1803 if (rc != 0) 1804 goto out_proto; 1805 1806 dev_add_pack(&x25_packet_type); 1807 1808 rc = register_netdevice_notifier(&x25_dev_notifier); 1809 if (rc != 0) 1810 goto out_sock; 1811 1812 printk(KERN_INFO "X.25 for Linux Version 0.2\n"); 1813 1814 x25_register_sysctl(); 1815 rc = x25_proc_init(); 1816 if (rc != 0) 1817 goto out_dev; 1818 out: 1819 return rc; 1820 out_dev: 1821 unregister_netdevice_notifier(&x25_dev_notifier); 1822 out_sock: 1823 sock_unregister(AF_X25); 1824 out_proto: 1825 proto_unregister(&x25_proto); 1826 goto out; 1827 } 1828 module_init(x25_init); 1829 1830 static void __exit x25_exit(void) 1831 { 1832 x25_proc_exit(); 1833 x25_link_free(); 1834 x25_route_free(); 1835 1836 x25_unregister_sysctl(); 1837 1838 unregister_netdevice_notifier(&x25_dev_notifier); 1839 1840 dev_remove_pack(&x25_packet_type); 1841 1842 sock_unregister(AF_X25); 1843 proto_unregister(&x25_proto); 1844 } 1845 module_exit(x25_exit); 1846 1847 MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); 1848 MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol"); 1849 MODULE_LICENSE("GPL"); 1850 MODULE_ALIAS_NETPROTO(PF_X25); 1851