1 /* 2 * linux/net/iucv/af_iucv.c 3 * 4 * IUCV protocol stack for Linux on zSeries 5 * 6 * Copyright 2006 IBM Corporation 7 * 8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/list.h> 14 #include <linux/errno.h> 15 #include <linux/kernel.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/poll.h> 21 #include <net/sock.h> 22 #include <asm/ebcdic.h> 23 #include <asm/cpcmd.h> 24 #include <linux/kmod.h> 25 26 #include <net/iucv/iucv.h> 27 #include <net/iucv/af_iucv.h> 28 29 #define CONFIG_IUCV_SOCK_DEBUG 1 30 31 #define IPRMDATA 0x80 32 #define VERSION "1.0" 33 34 static char iucv_userid[80]; 35 36 static struct proto_ops iucv_sock_ops; 37 38 static struct proto iucv_proto = { 39 .name = "AF_IUCV", 40 .owner = THIS_MODULE, 41 .obj_size = sizeof(struct iucv_sock), 42 }; 43 44 static void iucv_sock_kill(struct sock *sk); 45 static void iucv_sock_close(struct sock *sk); 46 47 /* Call Back functions */ 48 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 49 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 50 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); 51 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], 52 u8 ipuser[16]); 53 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 54 55 static struct iucv_sock_list iucv_sk_list = { 56 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 57 .autobind_name = ATOMIC_INIT(0) 58 }; 59 60 static struct iucv_handler af_iucv_handler = { 61 .path_pending = iucv_callback_connreq, 62 .path_complete = iucv_callback_connack, 63 .path_severed = iucv_callback_connrej, 64 .message_pending = iucv_callback_rx, 65 .message_complete = iucv_callback_txdone 66 }; 67 68 static inline void high_nmcpy(unsigned char *dst, char *src) 69 { 70 memcpy(dst, src, 8); 71 } 72 73 static inline void low_nmcpy(unsigned char *dst, char *src) 74 { 75 memcpy(&dst[8], src, 8); 76 } 77 78 /* Timers */ 79 static void iucv_sock_timeout(unsigned long arg) 80 { 81 struct sock *sk = (struct sock *)arg; 82 83 bh_lock_sock(sk); 84 sk->sk_err = ETIMEDOUT; 85 sk->sk_state_change(sk); 86 bh_unlock_sock(sk); 87 88 iucv_sock_kill(sk); 89 sock_put(sk); 90 } 91 92 static void iucv_sock_clear_timer(struct sock *sk) 93 { 94 sk_stop_timer(sk, &sk->sk_timer); 95 } 96 97 static struct sock *__iucv_get_sock_by_name(char *nm) 98 { 99 struct sock *sk; 100 struct hlist_node *node; 101 102 sk_for_each(sk, node, &iucv_sk_list.head) 103 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 104 return sk; 105 106 return NULL; 107 } 108 109 static void iucv_sock_destruct(struct sock *sk) 110 { 111 skb_queue_purge(&sk->sk_receive_queue); 112 skb_queue_purge(&sk->sk_write_queue); 113 } 114 115 /* Cleanup Listen */ 116 static void iucv_sock_cleanup_listen(struct sock *parent) 117 { 118 struct sock *sk; 119 120 /* Close non-accepted connections */ 121 while ((sk = iucv_accept_dequeue(parent, NULL))) { 122 iucv_sock_close(sk); 123 iucv_sock_kill(sk); 124 } 125 126 parent->sk_state = IUCV_CLOSED; 127 sock_set_flag(parent, SOCK_ZAPPED); 128 } 129 130 /* Kill socket */ 131 static void iucv_sock_kill(struct sock *sk) 132 { 133 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 134 return; 135 136 iucv_sock_unlink(&iucv_sk_list, sk); 137 sock_set_flag(sk, SOCK_DEAD); 138 sock_put(sk); 139 } 140 141 /* Close an IUCV socket */ 142 static void iucv_sock_close(struct sock *sk) 143 { 144 unsigned char user_data[16]; 145 struct iucv_sock *iucv = iucv_sk(sk); 146 int err; 147 unsigned long timeo; 148 149 iucv_sock_clear_timer(sk); 150 lock_sock(sk); 151 152 switch (sk->sk_state) { 153 case IUCV_LISTEN: 154 iucv_sock_cleanup_listen(sk); 155 break; 156 157 case IUCV_CONNECTED: 158 case IUCV_DISCONN: 159 err = 0; 160 161 sk->sk_state = IUCV_CLOSING; 162 sk->sk_state_change(sk); 163 164 if (!skb_queue_empty(&iucv->send_skb_q)) { 165 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 166 timeo = sk->sk_lingertime; 167 else 168 timeo = IUCV_DISCONN_TIMEOUT; 169 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); 170 } 171 172 sk->sk_state = IUCV_CLOSED; 173 sk->sk_state_change(sk); 174 175 if (iucv->path) { 176 low_nmcpy(user_data, iucv->src_name); 177 high_nmcpy(user_data, iucv->dst_name); 178 ASCEBC(user_data, sizeof(user_data)); 179 err = iucv_path_sever(iucv->path, user_data); 180 iucv_path_free(iucv->path); 181 iucv->path = NULL; 182 } 183 184 sk->sk_err = ECONNRESET; 185 sk->sk_state_change(sk); 186 187 skb_queue_purge(&iucv->send_skb_q); 188 skb_queue_purge(&iucv->backlog_skb_q); 189 190 sock_set_flag(sk, SOCK_ZAPPED); 191 break; 192 193 default: 194 sock_set_flag(sk, SOCK_ZAPPED); 195 break; 196 } 197 198 release_sock(sk); 199 iucv_sock_kill(sk); 200 } 201 202 static void iucv_sock_init(struct sock *sk, struct sock *parent) 203 { 204 if (parent) 205 sk->sk_type = parent->sk_type; 206 } 207 208 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) 209 { 210 struct sock *sk; 211 212 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); 213 if (!sk) 214 return NULL; 215 216 sock_init_data(sock, sk); 217 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 218 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 219 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 220 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 221 spin_lock_init(&iucv_sk(sk)->message_q.lock); 222 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 223 iucv_sk(sk)->send_tag = 0; 224 225 sk->sk_destruct = iucv_sock_destruct; 226 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 227 sk->sk_allocation = GFP_DMA; 228 229 sock_reset_flag(sk, SOCK_ZAPPED); 230 231 sk->sk_protocol = proto; 232 sk->sk_state = IUCV_OPEN; 233 234 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk); 235 236 iucv_sock_link(&iucv_sk_list, sk); 237 return sk; 238 } 239 240 /* Create an IUCV socket */ 241 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol) 242 { 243 struct sock *sk; 244 245 if (sock->type != SOCK_STREAM) 246 return -ESOCKTNOSUPPORT; 247 248 sock->state = SS_UNCONNECTED; 249 sock->ops = &iucv_sock_ops; 250 251 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); 252 if (!sk) 253 return -ENOMEM; 254 255 iucv_sock_init(sk, NULL); 256 257 return 0; 258 } 259 260 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) 261 { 262 write_lock_bh(&l->lock); 263 sk_add_node(sk, &l->head); 264 write_unlock_bh(&l->lock); 265 } 266 267 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) 268 { 269 write_lock_bh(&l->lock); 270 sk_del_node_init(sk); 271 write_unlock_bh(&l->lock); 272 } 273 274 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 275 { 276 unsigned long flags; 277 struct iucv_sock *par = iucv_sk(parent); 278 279 sock_hold(sk); 280 spin_lock_irqsave(&par->accept_q_lock, flags); 281 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); 282 spin_unlock_irqrestore(&par->accept_q_lock, flags); 283 iucv_sk(sk)->parent = parent; 284 parent->sk_ack_backlog++; 285 } 286 287 void iucv_accept_unlink(struct sock *sk) 288 { 289 unsigned long flags; 290 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); 291 292 spin_lock_irqsave(&par->accept_q_lock, flags); 293 list_del_init(&iucv_sk(sk)->accept_q); 294 spin_unlock_irqrestore(&par->accept_q_lock, flags); 295 iucv_sk(sk)->parent->sk_ack_backlog--; 296 iucv_sk(sk)->parent = NULL; 297 sock_put(sk); 298 } 299 300 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) 301 { 302 struct iucv_sock *isk, *n; 303 struct sock *sk; 304 305 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 306 sk = (struct sock *) isk; 307 lock_sock(sk); 308 309 if (sk->sk_state == IUCV_CLOSED) { 310 iucv_accept_unlink(sk); 311 release_sock(sk); 312 continue; 313 } 314 315 if (sk->sk_state == IUCV_CONNECTED || 316 sk->sk_state == IUCV_SEVERED || 317 !newsock) { 318 iucv_accept_unlink(sk); 319 if (newsock) 320 sock_graft(sk, newsock); 321 322 if (sk->sk_state == IUCV_SEVERED) 323 sk->sk_state = IUCV_DISCONN; 324 325 release_sock(sk); 326 return sk; 327 } 328 329 release_sock(sk); 330 } 331 return NULL; 332 } 333 334 int iucv_sock_wait_state(struct sock *sk, int state, int state2, 335 unsigned long timeo) 336 { 337 DECLARE_WAITQUEUE(wait, current); 338 int err = 0; 339 340 add_wait_queue(sk->sk_sleep, &wait); 341 while (sk->sk_state != state && sk->sk_state != state2) { 342 set_current_state(TASK_INTERRUPTIBLE); 343 344 if (!timeo) { 345 err = -EAGAIN; 346 break; 347 } 348 349 if (signal_pending(current)) { 350 err = sock_intr_errno(timeo); 351 break; 352 } 353 354 release_sock(sk); 355 timeo = schedule_timeout(timeo); 356 lock_sock(sk); 357 358 err = sock_error(sk); 359 if (err) 360 break; 361 } 362 set_current_state(TASK_RUNNING); 363 remove_wait_queue(sk->sk_sleep, &wait); 364 return err; 365 } 366 367 /* Bind an unbound socket */ 368 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 369 int addr_len) 370 { 371 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 372 struct sock *sk = sock->sk; 373 struct iucv_sock *iucv; 374 int err; 375 376 /* Verify the input sockaddr */ 377 if (!addr || addr->sa_family != AF_IUCV) 378 return -EINVAL; 379 380 lock_sock(sk); 381 if (sk->sk_state != IUCV_OPEN) { 382 err = -EBADFD; 383 goto done; 384 } 385 386 write_lock_bh(&iucv_sk_list.lock); 387 388 iucv = iucv_sk(sk); 389 if (__iucv_get_sock_by_name(sa->siucv_name)) { 390 err = -EADDRINUSE; 391 goto done_unlock; 392 } 393 if (iucv->path) { 394 err = 0; 395 goto done_unlock; 396 } 397 398 /* Bind the socket */ 399 memcpy(iucv->src_name, sa->siucv_name, 8); 400 401 /* Copy the user id */ 402 memcpy(iucv->src_user_id, iucv_userid, 8); 403 sk->sk_state = IUCV_BOUND; 404 err = 0; 405 406 done_unlock: 407 /* Release the socket list lock */ 408 write_unlock_bh(&iucv_sk_list.lock); 409 done: 410 release_sock(sk); 411 return err; 412 } 413 414 /* Automatically bind an unbound socket */ 415 static int iucv_sock_autobind(struct sock *sk) 416 { 417 struct iucv_sock *iucv = iucv_sk(sk); 418 char query_buffer[80]; 419 char name[12]; 420 int err = 0; 421 422 /* Set the userid and name */ 423 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err); 424 if (unlikely(err)) 425 return -EPROTO; 426 427 memcpy(iucv->src_user_id, query_buffer, 8); 428 429 write_lock_bh(&iucv_sk_list.lock); 430 431 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); 432 while (__iucv_get_sock_by_name(name)) { 433 sprintf(name, "%08x", 434 atomic_inc_return(&iucv_sk_list.autobind_name)); 435 } 436 437 write_unlock_bh(&iucv_sk_list.lock); 438 439 memcpy(&iucv->src_name, name, 8); 440 441 return err; 442 } 443 444 /* Connect an unconnected socket */ 445 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 446 int alen, int flags) 447 { 448 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 449 struct sock *sk = sock->sk; 450 struct iucv_sock *iucv; 451 unsigned char user_data[16]; 452 int err; 453 454 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) 455 return -EINVAL; 456 457 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 458 return -EBADFD; 459 460 if (sk->sk_type != SOCK_STREAM) 461 return -EINVAL; 462 463 iucv = iucv_sk(sk); 464 465 if (sk->sk_state == IUCV_OPEN) { 466 err = iucv_sock_autobind(sk); 467 if (unlikely(err)) 468 return err; 469 } 470 471 lock_sock(sk); 472 473 /* Set the destination information */ 474 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); 475 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); 476 477 high_nmcpy(user_data, sa->siucv_name); 478 low_nmcpy(user_data, iucv_sk(sk)->src_name); 479 ASCEBC(user_data, sizeof(user_data)); 480 481 iucv = iucv_sk(sk); 482 /* Create path. */ 483 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT, 484 IPRMDATA, GFP_KERNEL); 485 if (!iucv->path) { 486 err = -ENOMEM; 487 goto done; 488 } 489 err = iucv_path_connect(iucv->path, &af_iucv_handler, 490 sa->siucv_user_id, NULL, user_data, sk); 491 if (err) { 492 iucv_path_free(iucv->path); 493 iucv->path = NULL; 494 err = -ECONNREFUSED; 495 goto done; 496 } 497 498 if (sk->sk_state != IUCV_CONNECTED) { 499 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, 500 sock_sndtimeo(sk, flags & O_NONBLOCK)); 501 } 502 503 if (sk->sk_state == IUCV_DISCONN) { 504 release_sock(sk); 505 return -ECONNREFUSED; 506 } 507 done: 508 release_sock(sk); 509 return err; 510 } 511 512 /* Move a socket into listening state. */ 513 static int iucv_sock_listen(struct socket *sock, int backlog) 514 { 515 struct sock *sk = sock->sk; 516 int err; 517 518 lock_sock(sk); 519 520 err = -EINVAL; 521 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) 522 goto done; 523 524 sk->sk_max_ack_backlog = backlog; 525 sk->sk_ack_backlog = 0; 526 sk->sk_state = IUCV_LISTEN; 527 err = 0; 528 529 done: 530 release_sock(sk); 531 return err; 532 } 533 534 /* Accept a pending connection */ 535 static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 536 int flags) 537 { 538 DECLARE_WAITQUEUE(wait, current); 539 struct sock *sk = sock->sk, *nsk; 540 long timeo; 541 int err = 0; 542 543 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 544 545 if (sk->sk_state != IUCV_LISTEN) { 546 err = -EBADFD; 547 goto done; 548 } 549 550 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 551 552 /* Wait for an incoming connection */ 553 add_wait_queue_exclusive(sk->sk_sleep, &wait); 554 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 555 set_current_state(TASK_INTERRUPTIBLE); 556 if (!timeo) { 557 err = -EAGAIN; 558 break; 559 } 560 561 release_sock(sk); 562 timeo = schedule_timeout(timeo); 563 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 564 565 if (sk->sk_state != IUCV_LISTEN) { 566 err = -EBADFD; 567 break; 568 } 569 570 if (signal_pending(current)) { 571 err = sock_intr_errno(timeo); 572 break; 573 } 574 } 575 576 set_current_state(TASK_RUNNING); 577 remove_wait_queue(sk->sk_sleep, &wait); 578 579 if (err) 580 goto done; 581 582 newsock->state = SS_CONNECTED; 583 584 done: 585 release_sock(sk); 586 return err; 587 } 588 589 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, 590 int *len, int peer) 591 { 592 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 593 struct sock *sk = sock->sk; 594 595 addr->sa_family = AF_IUCV; 596 *len = sizeof(struct sockaddr_iucv); 597 598 if (peer) { 599 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); 600 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); 601 } else { 602 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); 603 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); 604 } 605 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 606 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 607 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 608 609 return 0; 610 } 611 612 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 613 struct msghdr *msg, size_t len) 614 { 615 struct sock *sk = sock->sk; 616 struct iucv_sock *iucv = iucv_sk(sk); 617 struct sk_buff *skb; 618 struct iucv_message txmsg; 619 int err; 620 621 err = sock_error(sk); 622 if (err) 623 return err; 624 625 if (msg->msg_flags & MSG_OOB) 626 return -EOPNOTSUPP; 627 628 lock_sock(sk); 629 630 if (sk->sk_shutdown & SEND_SHUTDOWN) { 631 err = -EPIPE; 632 goto out; 633 } 634 635 if (sk->sk_state == IUCV_CONNECTED) { 636 if (!(skb = sock_alloc_send_skb(sk, len, 637 msg->msg_flags & MSG_DONTWAIT, 638 &err))) 639 goto out; 640 641 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 642 err = -EFAULT; 643 goto fail; 644 } 645 646 txmsg.class = 0; 647 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len); 648 txmsg.tag = iucv->send_tag++; 649 memcpy(skb->cb, &txmsg.tag, 4); 650 skb_queue_tail(&iucv->send_skb_q, skb); 651 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 652 (void *) skb->data, skb->len); 653 if (err) { 654 if (err == 3) 655 printk(KERN_ERR "AF_IUCV msg limit exceeded\n"); 656 skb_unlink(skb, &iucv->send_skb_q); 657 err = -EPIPE; 658 goto fail; 659 } 660 661 } else { 662 err = -ENOTCONN; 663 goto out; 664 } 665 666 release_sock(sk); 667 return len; 668 669 fail: 670 kfree_skb(skb); 671 out: 672 release_sock(sk); 673 return err; 674 } 675 676 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) 677 { 678 int dataleft, size, copied = 0; 679 struct sk_buff *nskb; 680 681 dataleft = len; 682 while (dataleft) { 683 if (dataleft >= sk->sk_rcvbuf / 4) 684 size = sk->sk_rcvbuf / 4; 685 else 686 size = dataleft; 687 688 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); 689 if (!nskb) 690 return -ENOMEM; 691 692 memcpy(nskb->data, skb->data + copied, size); 693 copied += size; 694 dataleft -= size; 695 696 skb_reset_transport_header(nskb); 697 skb_reset_network_header(nskb); 698 nskb->len = size; 699 700 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); 701 } 702 703 return 0; 704 } 705 706 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 707 struct iucv_path *path, 708 struct iucv_message *msg) 709 { 710 int rc; 711 712 if (msg->flags & IPRMDATA) { 713 skb->data = NULL; 714 skb->len = 0; 715 } else { 716 rc = iucv_message_receive(path, msg, 0, skb->data, 717 msg->length, NULL); 718 if (rc) { 719 kfree_skb(skb); 720 return; 721 } 722 if (skb->truesize >= sk->sk_rcvbuf / 4) { 723 rc = iucv_fragment_skb(sk, skb, msg->length); 724 kfree_skb(skb); 725 skb = NULL; 726 if (rc) { 727 iucv_path_sever(path, NULL); 728 return; 729 } 730 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 731 } else { 732 skb_reset_transport_header(skb); 733 skb_reset_network_header(skb); 734 skb->len = msg->length; 735 } 736 } 737 738 if (sock_queue_rcv_skb(sk, skb)) 739 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 740 } 741 742 static void iucv_process_message_q(struct sock *sk) 743 { 744 struct iucv_sock *iucv = iucv_sk(sk); 745 struct sk_buff *skb; 746 struct sock_msg_q *p, *n; 747 748 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 749 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); 750 if (!skb) 751 break; 752 iucv_process_message(sk, skb, p->path, &p->msg); 753 list_del(&p->list); 754 kfree(p); 755 if (!skb_queue_empty(&iucv->backlog_skb_q)) 756 break; 757 } 758 } 759 760 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 761 struct msghdr *msg, size_t len, int flags) 762 { 763 int noblock = flags & MSG_DONTWAIT; 764 struct sock *sk = sock->sk; 765 struct iucv_sock *iucv = iucv_sk(sk); 766 int target, copied = 0; 767 struct sk_buff *skb, *rskb, *cskb; 768 int err = 0; 769 770 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 771 skb_queue_empty(&iucv->backlog_skb_q) && 772 skb_queue_empty(&sk->sk_receive_queue) && 773 list_empty(&iucv->message_q.list)) 774 return 0; 775 776 if (flags & (MSG_OOB)) 777 return -EOPNOTSUPP; 778 779 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 780 781 skb = skb_recv_datagram(sk, flags, noblock, &err); 782 if (!skb) { 783 if (sk->sk_shutdown & RCV_SHUTDOWN) 784 return 0; 785 return err; 786 } 787 788 copied = min_t(unsigned int, skb->len, len); 789 790 cskb = skb; 791 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 792 skb_queue_head(&sk->sk_receive_queue, skb); 793 if (copied == 0) 794 return -EFAULT; 795 goto done; 796 } 797 798 len -= copied; 799 800 /* Mark read part of skb as used */ 801 if (!(flags & MSG_PEEK)) { 802 skb_pull(skb, copied); 803 804 if (skb->len) { 805 skb_queue_head(&sk->sk_receive_queue, skb); 806 goto done; 807 } 808 809 kfree_skb(skb); 810 811 /* Queue backlog skbs */ 812 rskb = skb_dequeue(&iucv->backlog_skb_q); 813 while (rskb) { 814 if (sock_queue_rcv_skb(sk, rskb)) { 815 skb_queue_head(&iucv->backlog_skb_q, 816 rskb); 817 break; 818 } else { 819 rskb = skb_dequeue(&iucv->backlog_skb_q); 820 } 821 } 822 if (skb_queue_empty(&iucv->backlog_skb_q)) { 823 spin_lock_bh(&iucv->message_q.lock); 824 if (!list_empty(&iucv->message_q.list)) 825 iucv_process_message_q(sk); 826 spin_unlock_bh(&iucv->message_q.lock); 827 } 828 829 } else 830 skb_queue_head(&sk->sk_receive_queue, skb); 831 832 done: 833 return err ? : copied; 834 } 835 836 static inline unsigned int iucv_accept_poll(struct sock *parent) 837 { 838 struct iucv_sock *isk, *n; 839 struct sock *sk; 840 841 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 842 sk = (struct sock *) isk; 843 844 if (sk->sk_state == IUCV_CONNECTED) 845 return POLLIN | POLLRDNORM; 846 } 847 848 return 0; 849 } 850 851 unsigned int iucv_sock_poll(struct file *file, struct socket *sock, 852 poll_table *wait) 853 { 854 struct sock *sk = sock->sk; 855 unsigned int mask = 0; 856 857 poll_wait(file, sk->sk_sleep, wait); 858 859 if (sk->sk_state == IUCV_LISTEN) 860 return iucv_accept_poll(sk); 861 862 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 863 mask |= POLLERR; 864 865 if (sk->sk_shutdown & RCV_SHUTDOWN) 866 mask |= POLLRDHUP; 867 868 if (sk->sk_shutdown == SHUTDOWN_MASK) 869 mask |= POLLHUP; 870 871 if (!skb_queue_empty(&sk->sk_receive_queue) || 872 (sk->sk_shutdown & RCV_SHUTDOWN)) 873 mask |= POLLIN | POLLRDNORM; 874 875 if (sk->sk_state == IUCV_CLOSED) 876 mask |= POLLHUP; 877 878 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) 879 mask |= POLLIN; 880 881 if (sock_writeable(sk)) 882 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 883 else 884 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 885 886 return mask; 887 } 888 889 static int iucv_sock_shutdown(struct socket *sock, int how) 890 { 891 struct sock *sk = sock->sk; 892 struct iucv_sock *iucv = iucv_sk(sk); 893 struct iucv_message txmsg; 894 int err = 0; 895 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 896 897 how++; 898 899 if ((how & ~SHUTDOWN_MASK) || !how) 900 return -EINVAL; 901 902 lock_sock(sk); 903 switch (sk->sk_state) { 904 case IUCV_CLOSED: 905 err = -ENOTCONN; 906 goto fail; 907 908 default: 909 sk->sk_shutdown |= how; 910 break; 911 } 912 913 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 914 txmsg.class = 0; 915 txmsg.tag = 0; 916 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 917 (void *) prmmsg, 8); 918 if (err) { 919 switch (err) { 920 case 1: 921 err = -ENOTCONN; 922 break; 923 case 2: 924 err = -ECONNRESET; 925 break; 926 default: 927 err = -ENOTCONN; 928 break; 929 } 930 } 931 } 932 933 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 934 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); 935 if (err) 936 err = -ENOTCONN; 937 938 skb_queue_purge(&sk->sk_receive_queue); 939 } 940 941 /* Wake up anyone sleeping in poll */ 942 sk->sk_state_change(sk); 943 944 fail: 945 release_sock(sk); 946 return err; 947 } 948 949 static int iucv_sock_release(struct socket *sock) 950 { 951 struct sock *sk = sock->sk; 952 int err = 0; 953 954 if (!sk) 955 return 0; 956 957 iucv_sock_close(sk); 958 959 /* Unregister with IUCV base support */ 960 if (iucv_sk(sk)->path) { 961 iucv_path_sever(iucv_sk(sk)->path, NULL); 962 iucv_path_free(iucv_sk(sk)->path); 963 iucv_sk(sk)->path = NULL; 964 } 965 966 sock_orphan(sk); 967 iucv_sock_kill(sk); 968 return err; 969 } 970 971 /* Callback wrappers - called from iucv base support */ 972 static int iucv_callback_connreq(struct iucv_path *path, 973 u8 ipvmid[8], u8 ipuser[16]) 974 { 975 unsigned char user_data[16]; 976 unsigned char nuser_data[16]; 977 unsigned char src_name[8]; 978 struct hlist_node *node; 979 struct sock *sk, *nsk; 980 struct iucv_sock *iucv, *niucv; 981 int err; 982 983 memcpy(src_name, ipuser, 8); 984 EBCASC(src_name, 8); 985 /* Find out if this path belongs to af_iucv. */ 986 read_lock(&iucv_sk_list.lock); 987 iucv = NULL; 988 sk = NULL; 989 sk_for_each(sk, node, &iucv_sk_list.head) 990 if (sk->sk_state == IUCV_LISTEN && 991 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 992 /* 993 * Found a listening socket with 994 * src_name == ipuser[0-7]. 995 */ 996 iucv = iucv_sk(sk); 997 break; 998 } 999 read_unlock(&iucv_sk_list.lock); 1000 if (!iucv) 1001 /* No socket found, not one of our paths. */ 1002 return -EINVAL; 1003 1004 bh_lock_sock(sk); 1005 1006 /* Check if parent socket is listening */ 1007 low_nmcpy(user_data, iucv->src_name); 1008 high_nmcpy(user_data, iucv->dst_name); 1009 ASCEBC(user_data, sizeof(user_data)); 1010 if (sk->sk_state != IUCV_LISTEN) { 1011 err = iucv_path_sever(path, user_data); 1012 goto fail; 1013 } 1014 1015 /* Check for backlog size */ 1016 if (sk_acceptq_is_full(sk)) { 1017 err = iucv_path_sever(path, user_data); 1018 goto fail; 1019 } 1020 1021 /* Create the new socket */ 1022 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1023 if (!nsk) { 1024 err = iucv_path_sever(path, user_data); 1025 goto fail; 1026 } 1027 1028 niucv = iucv_sk(nsk); 1029 iucv_sock_init(nsk, sk); 1030 1031 /* Set the new iucv_sock */ 1032 memcpy(niucv->dst_name, ipuser + 8, 8); 1033 EBCASC(niucv->dst_name, 8); 1034 memcpy(niucv->dst_user_id, ipvmid, 8); 1035 memcpy(niucv->src_name, iucv->src_name, 8); 1036 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1037 niucv->path = path; 1038 1039 /* Call iucv_accept */ 1040 high_nmcpy(nuser_data, ipuser + 8); 1041 memcpy(nuser_data + 8, niucv->src_name, 8); 1042 ASCEBC(nuser_data + 8, 8); 1043 1044 path->msglim = IUCV_QUEUELEN_DEFAULT; 1045 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1046 if (err) { 1047 err = iucv_path_sever(path, user_data); 1048 goto fail; 1049 } 1050 1051 iucv_accept_enqueue(sk, nsk); 1052 1053 /* Wake up accept */ 1054 nsk->sk_state = IUCV_CONNECTED; 1055 sk->sk_data_ready(sk, 1); 1056 err = 0; 1057 fail: 1058 bh_unlock_sock(sk); 1059 return 0; 1060 } 1061 1062 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1063 { 1064 struct sock *sk = path->private; 1065 1066 sk->sk_state = IUCV_CONNECTED; 1067 sk->sk_state_change(sk); 1068 } 1069 1070 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1071 { 1072 struct sock *sk = path->private; 1073 struct iucv_sock *iucv = iucv_sk(sk); 1074 struct sk_buff *skb; 1075 struct sock_msg_q *save_msg; 1076 int len; 1077 1078 if (sk->sk_shutdown & RCV_SHUTDOWN) 1079 return; 1080 1081 if (!list_empty(&iucv->message_q.list) || 1082 !skb_queue_empty(&iucv->backlog_skb_q)) 1083 goto save_message; 1084 1085 len = atomic_read(&sk->sk_rmem_alloc); 1086 len += msg->length + sizeof(struct sk_buff); 1087 if (len > sk->sk_rcvbuf) 1088 goto save_message; 1089 1090 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); 1091 if (!skb) 1092 goto save_message; 1093 1094 spin_lock(&iucv->message_q.lock); 1095 iucv_process_message(sk, skb, path, msg); 1096 spin_unlock(&iucv->message_q.lock); 1097 1098 return; 1099 1100 save_message: 1101 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1102 if (!save_msg) 1103 return; 1104 save_msg->path = path; 1105 save_msg->msg = *msg; 1106 1107 spin_lock(&iucv->message_q.lock); 1108 list_add_tail(&save_msg->list, &iucv->message_q.list); 1109 spin_unlock(&iucv->message_q.lock); 1110 } 1111 1112 static void iucv_callback_txdone(struct iucv_path *path, 1113 struct iucv_message *msg) 1114 { 1115 struct sock *sk = path->private; 1116 struct sk_buff *this = NULL; 1117 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; 1118 struct sk_buff *list_skb = list->next; 1119 unsigned long flags; 1120 1121 if (!skb_queue_empty(list)) { 1122 spin_lock_irqsave(&list->lock, flags); 1123 1124 while (list_skb != (struct sk_buff *)list) { 1125 if (!memcmp(&msg->tag, list_skb->cb, 4)) { 1126 this = list_skb; 1127 break; 1128 } 1129 list_skb = list_skb->next; 1130 } 1131 if (this) 1132 __skb_unlink(this, list); 1133 1134 spin_unlock_irqrestore(&list->lock, flags); 1135 1136 if (this) 1137 kfree_skb(this); 1138 } 1139 BUG_ON(!this); 1140 1141 if (sk->sk_state == IUCV_CLOSING) { 1142 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1143 sk->sk_state = IUCV_CLOSED; 1144 sk->sk_state_change(sk); 1145 } 1146 } 1147 1148 } 1149 1150 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1151 { 1152 struct sock *sk = path->private; 1153 1154 if (!list_empty(&iucv_sk(sk)->accept_q)) 1155 sk->sk_state = IUCV_SEVERED; 1156 else 1157 sk->sk_state = IUCV_DISCONN; 1158 1159 sk->sk_state_change(sk); 1160 } 1161 1162 static struct proto_ops iucv_sock_ops = { 1163 .family = PF_IUCV, 1164 .owner = THIS_MODULE, 1165 .release = iucv_sock_release, 1166 .bind = iucv_sock_bind, 1167 .connect = iucv_sock_connect, 1168 .listen = iucv_sock_listen, 1169 .accept = iucv_sock_accept, 1170 .getname = iucv_sock_getname, 1171 .sendmsg = iucv_sock_sendmsg, 1172 .recvmsg = iucv_sock_recvmsg, 1173 .poll = iucv_sock_poll, 1174 .ioctl = sock_no_ioctl, 1175 .mmap = sock_no_mmap, 1176 .socketpair = sock_no_socketpair, 1177 .shutdown = iucv_sock_shutdown, 1178 .setsockopt = sock_no_setsockopt, 1179 .getsockopt = sock_no_getsockopt 1180 }; 1181 1182 static struct net_proto_family iucv_sock_family_ops = { 1183 .family = AF_IUCV, 1184 .owner = THIS_MODULE, 1185 .create = iucv_sock_create, 1186 }; 1187 1188 static int __init afiucv_init(void) 1189 { 1190 int err; 1191 1192 if (!MACHINE_IS_VM) { 1193 printk(KERN_ERR "AF_IUCV connection needs VM as base\n"); 1194 err = -EPROTONOSUPPORT; 1195 goto out; 1196 } 1197 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 1198 if (unlikely(err)) { 1199 WARN_ON(err); 1200 err = -EPROTONOSUPPORT; 1201 goto out; 1202 } 1203 1204 err = iucv_register(&af_iucv_handler, 0); 1205 if (err) 1206 goto out; 1207 err = proto_register(&iucv_proto, 0); 1208 if (err) 1209 goto out_iucv; 1210 err = sock_register(&iucv_sock_family_ops); 1211 if (err) 1212 goto out_proto; 1213 return 0; 1214 1215 out_proto: 1216 proto_unregister(&iucv_proto); 1217 out_iucv: 1218 iucv_unregister(&af_iucv_handler, 0); 1219 out: 1220 return err; 1221 } 1222 1223 static void __exit afiucv_exit(void) 1224 { 1225 sock_unregister(PF_IUCV); 1226 proto_unregister(&iucv_proto); 1227 iucv_unregister(&af_iucv_handler, 0); 1228 } 1229 1230 module_init(afiucv_init); 1231 module_exit(afiucv_exit); 1232 1233 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); 1234 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); 1235 MODULE_VERSION(VERSION); 1236 MODULE_LICENSE("GPL"); 1237 MODULE_ALIAS_NETPROTO(PF_IUCV); 1238